blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46d8832c988bf4eb6ff75712e08950beea18fb75 | 615bc7e8d365212288732a43a6a937537185954e | /10classes.py | 3461968fdbde85967204f62415625898eaa1d392 | [] | no_license | KnightsTiger/tratsgnittegnohtypetisnilarulp | 2f846c5a740f1f378273f7125a2d41f7d30f95d6 | 21f639efc21d64d145e95262dc70ba067d38aca5 | refs/heads/master | 2021-05-26T01:44:05.210790 | 2020-04-09T15:59:14 | 2020-04-09T15:59:14 | 254,004,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | #As a convention a class is starting from a capital letter
class Studnets:
print("done")
##saman = Studnets()
#--------------------------------------------------------------------#
#Adding a method to a class
# calsses = []
# class NewStudents:
# def addClass(self,name,count): #Without self keyword this will not work
# newClass = {"name":name,"Count":count}
# calsses.append(newClass)
# student = NewStudents()
# student.addClass("Java",10)
# print(calsses)
#--------------------------------------------------------------------#
#Adding constructor
# calsses = []
# class NewStudents:
# def __init__(self,name,count): #Without self keyword this will not work
# newClass = {"name":name,"Count":count}
# calsses.append(newClass)
# student = NewStudents("Java",10)
#--------------------------------------------------------------------#
#overriding str method
# calsses = []
# class NewStudents:
# def __init__(self,name,count): #Without self keyword this will not work
# newClass = {"name":name,"Count":count}
# calsses.append(newClass)
# def __str__(self):
# return "Students class"
# student = NewStudents("Java",10)
# print(student) # here you can find the overrided variable
#--------------------------------------------------------------------#
# class JavaClass:
# courseName = "Java" #This is a class attribute. So from outside this can access
# def setCount(self,numberOfStudents):
# self.numberOfStudents = numberOfStudents # numberOfStudents can access from anywhere in the program
# def getCount(self):
# return self.numberOfStudents
# print(JavaClass.courseName)
# myClass = JavaClass()
# myClass.setCount(10)
# print(myClass.getCount())
#--------------------------------------------------------------------#
# Inheritance
class JavaClass:
courseName = "Java" #This is a class attribute. So from outside this can access
def setCount(self,numberOfStudents):
self.numberOfStudents = numberOfStudents # numberOfStudents can access from anywhere in the program
def getCount(self):
print("hi")
class courses(JavaClass):
courseName = "Advance"
def __str__(self):
return "Super Class"
print(JavaClass.courseName)
myClass = JavaClass()
myClass.setCount(10)
print(myClass.getCount())
myClassMain = courses()
print(myClassMain.courseName)
print(myClassMain.getCount())
print(myClassMain) | [
"tocaldera6734@gmail.com"
] | tocaldera6734@gmail.com |
710bcc0fb5dcc70b3aacdae1595043478681cdb2 | 02560440f9f91e583fe98d80ab11e18aa6c7a525 | /apps/usuarios/migrations/0003_usuario_correo.py | ad084ca9dc73a72604d08e401a8af1a08f618f45 | [] | no_license | eduardogpg/wamadeusV1 | a36c89176543e638486009620c5131f46743edbc | 82d93293dc6afc95a6661f727162f4055ab83a43 | refs/heads/master | 2020-12-28T01:57:47.831689 | 2015-01-08T05:14:25 | 2015-01-08T05:14:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0002_auto_20141215_1710'),
]
operations = [
migrations.AddField(
model_name='usuario',
name='correo',
field=models.EmailField(default=' ', max_length=50),
preserve_default=False,
),
]
| [
"eduardo78d@gmail.com"
] | eduardo78d@gmail.com |
004ffb4848cb44c224fac37051f30378f5af9281 | 4bbb118943bbd8a1296ab96ebf092c9d26232c20 | /neuroballad/neuroballad_execute.py | 496f4dd1126161ba9aa38f38c9670f019cda0b25 | [
"BSD-3-Clause"
] | permissive | TK-21st/Neuroballad | 66e0baf07fa9f0bcd3118a24e5b466f8dbbf289f | 6d4800e969c35b0f2d64897db24b734a9daaa160 | refs/heads/master | 2020-09-02T23:08:04.598245 | 2019-05-19T17:27:41 | 2019-05-19T17:27:41 | 219,328,262 | 0 | 0 | BSD-3-Clause | 2019-11-03T16:12:28 | 2019-11-03T16:12:28 | null | UTF-8 | Python | false | false | 2,633 | py | import numpy as np
import h5py
import networkx as nx
import argparse
import itertools
import random
import pickle
from neurokernel.LPU.InputProcessors.StepInputProcessor import StepInputProcessor
from neurokernel.LPU.InputProcessors.FileInputProcessor import FileInputProcessor
from neurokernel.tools.logging import setup_logger
from neurokernel.LPU.LPU import LPU
def main():
import neurokernel.mpi_relaunch
import neurokernel.core_gpu as core
(comp_dict, conns) = LPU.lpu_parser('neuroballad_temp_model.gexf.gz')
with open('run_parameters.pickle', 'rb') as f:
run_parameters = pickle.load(f)
with open('record_parameters.pickle', 'rb') as f:
record_parameters = pickle.load(f)
dur = 1.0
dt = 1e-4
dur = run_parameters[0]
dt = run_parameters[1]
fl_input_processor = FileInputProcessor('neuroballad_temp_model_input.h5')
from neurokernel.LPU.OutputProcessors.FileOutputProcessor import FileOutputProcessor
output_processor = FileOutputProcessor(record_parameters, 'neuroballad_temp_model_output.h5', sample_interval=1)
#Parse extra arguments
parser = argparse.ArgumentParser()
parser.add_argument('--debug', default=False,
dest='debug', action='store_true',
help='Write connectivity structures and inter-LPU routed data in debug folder')
parser.add_argument('-l', '--log', default='file', type=str,
help='Log output to screen [file, screen, both, or none; default:none]')
parser.add_argument('-r', '--time_sync', default=False, action='store_true',
help='Time data reception throughput [default: False]')
parser.add_argument('-g', '--gpu_dev', default=[0, 1], type=int, nargs='+',
help='GPU device numbers [default: 0 1]')
parser.add_argument('-d', '--disconnect', default=False, action='store_true',
help='Run with disconnected LPUs [default: False]')
args = parser.parse_args()
file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
screen = True
logger = setup_logger(file_name=file_name, screen=screen)
man = core.Manager()
man.add(LPU, 'lpu', dt, comp_dict, conns,
input_processors = [fl_input_processor ],
output_processors = [output_processor], device=args.gpu_dev[1],
debug=True)
steps = int(dur/dt)
man.spawn()
man.start(steps = steps)
man.wait()
if __name__=='__main__':
main()
| [
"mkt2126@columbia.edu"
] | mkt2126@columbia.edu |
22af5780ca6318f0076646d9a994de340505c2f5 | 5dbb3c65e945a366b2d5c7a4c44fe21c6dfc7b47 | /matrix_addition.py | 508c26721ec762ba3530edec605629d39dc8ccf9 | [] | no_license | kblasi/portfolio | b1e5eb5ea2ad60d939aeb5d4a64c8674d15b1397 | 7b4b671f45717854ee22db5a2fbe6246df234336 | refs/heads/main | 2023-04-26T11:56:01.111130 | 2021-05-17T03:12:37 | 2021-05-17T03:12:37 | 368,036,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | '''
We'll take two matrices with the same dimensions, add them, and return the result
'''
#do it this way or use range function?
def matrix_addition(a, b):
' a and b are two dimensional matrices with the same dimensions '
#initialize the sum of a and b to be a copy of a
sum_of_matrices = a.copy()
#assuming we have two in same dimensions
for i in range(len(a)):
for j in range(len(a[0])):
sum_of_matrices[i][j] = a[i][j] + b[i][j]
return sum_of_matrices
def matrix_addition2(a,b):
sum_of_matrices = a.copy()
i = 0
for curr_list in a:
j = 0
for curr_number in curr_list:
sum_of_matrices[i][j] = a[i][j] + b[i][j]
j+=1
i+=1
return sum_of_matrices
#while loop matrices (unessecarily hard)
def matrix_addition3(a,b):
sum_of_matrices = a.copy()
i = 0
while i < len(a):
j = 0
while j < len(a[0]):
sum_of_matrices[i][j] = a[i][j] + b[i][j]
j+=1
i+=1
return sum_of_matrices | [
"noreply@github.com"
] | noreply@github.com |
99d1df035fe01289d72bae11c68d2a404070e8f0 | 3e8561909eea121ce68fd824d8000975b6e09cf5 | /orders/migrations/0001_initial.py | f329065de7593cd2310682c78f8fcbb0a642c89b | [] | no_license | SebastianMeler/django-shop | c7d70d4a23c11afec5a0f802e5c94cf8154bc3d1 | 04412cf0a1f783a6951b2d41be63fcc16ca16bf7 | refs/heads/master | 2023-09-01T02:36:27.877618 | 2023-08-22T10:57:17 | 2023-08-22T10:57:17 | 248,446,058 | 0 | 0 | null | 2023-08-22T10:57:19 | 2020-03-19T08:13:11 | Python | UTF-8 | Python | false | false | 1,797 | py | # Generated by Django 3.0.4 on 2020-03-20 08:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='shop.Product')),
],
),
]
| [
"sebastianissimus@gmail.com"
] | sebastianissimus@gmail.com |
3a59b6324f48032a8c58f34957ffbed79c1fcb08 | 72f2f37c3c33e5bc02ec6c707a7c858d7990db3a | /examples/tour_examples/driverjs_maps_tour.py | 33fb342608c1c2cd08a48da9c5a1aab3f8ac71a0 | [
"MIT"
] | permissive | matthewxuda/SeleniumBase | 190e4917dec8c731f17fd9d6a1247f8c17086d0c | efd282a860206dad81d0d4e61a472138eb04328d | refs/heads/master | 2023-09-01T09:17:57.608760 | 2021-10-21T02:48:32 | 2021-10-21T02:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,129 | py | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_create_tour(self):
self.open("https://www.google.com/maps/@42.3591234,-71.0915634,15z")
self.wait_for_element("#searchboxinput", timeout=20)
self.wait_for_element("#minimap", timeout=20)
self.wait_for_element("#zoom", timeout=20)
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step(
"🗺️ Welcome to Google Maps 🗺️",
"html",
title="✅ SeleniumBase Tours 🌎",
)
self.add_tour_step(
"You can type a location into this Search box.", "#searchboxinput"
)
self.add_tour_step(
"Then click here to view it on the map.",
"#searchbox-searchbutton",
alignment="bottom",
)
self.add_tour_step(
"Or click here to get driving directions.",
"#searchbox-directions",
alignment="bottom",
)
self.add_tour_step(
"Use this button to get a Satellite view.",
"div.widget-minimap-shim",
alignment="right",
)
self.add_tour_step(
"Click here to zoom in.", "#widget-zoom-in", alignment="left"
)
self.add_tour_step(
"Or click here to zoom out.", "#widget-zoom-out", alignment="left"
)
self.add_tour_step(
"Use the Menu button for more options.",
".searchbox-hamburger-container",
alignment="right",
)
self.add_tour_step(
"Or click here to see more Google apps.",
'[title="Google apps"]',
alignment="left",
)
self.add_tour_step(
"Thanks for using SeleniumBase Tours",
"html",
title="🚃 End of Guided Tour 🚃",
)
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
4418a4347de2932ba4812bb390ac0ef4e1b3efdb | c2f995670ed2ed2033d255a6ab96093ec7a55a1d | /scheduleweb/authorization/admin.py | b23d5692ef70f0c812ccbe545bb8c90f1bbf4c9c | [] | no_license | edvegas/scheduleapp | 14cd8d2d28c0adb8bab470fb1b730f1f4ea76abd | 7dbc55ba79eb7b26ee795c5aef567d985d12e96d | refs/heads/master | 2020-04-22T15:02:01.580961 | 2019-04-18T12:40:46 | 2019-04-18T12:40:46 | 170,464,380 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from django.contrib import admin
from .models import Profile
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| [
"edbulatov@gmail.com"
] | edbulatov@gmail.com |
1102f5d0aa67daf57769bc61b420c4daa5a19e6d | 5ed5f99610de14cab056018cd46f8305a15097da | /part4/while-loop.py | e5f64ebc0ec32a3002c35d79b42f8e11d13d2f6b | [] | no_license | shazam2064/complete-python-developer | 04288f07e8901cb881bb8c58fd1fd690015e5803 | c5746e61307e29c387450b089d8d1156f886ce3e | refs/heads/main | 2023-05-06T07:40:26.752274 | 2021-06-02T16:05:32 | 2021-06-02T16:05:32 | 370,096,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | # x = 0
#
# while x < 10000000000000000:
# x += 1
# print(x)
# else:
# print("print x is not less than 10")
# x = 0
#
# while x < 10:
# x += 3
# print(x)
# else:
# print("print x is not less than 10")
x = 0
while x < 10:
if x == 6:
continue
x += 3
print(x)
else:
print("print x is not less than 10")
| [
"gabriel.salomon.2007@gmail.com"
] | gabriel.salomon.2007@gmail.com |
48067e4ceef655c896f3a35b0571079df7c10a52 | 97a4d29863d1ce96f366554fdd985c3ce580bb5d | /061.py | f14890c43a1e22228adf9d4732a5d4ba2c6c44f6 | [] | no_license | Everfighting/Python-Algorithms | 5c3a102fed3a29858f3112d657c69e077efc7e28 | 235e9b4c66602035be39a8d3b3ad9cf016aebbb9 | refs/heads/master | 2021-01-20T22:19:18.902687 | 2018-03-02T05:38:27 | 2018-03-02T05:38:27 | 61,302,323 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = []
for i in range(10):
a.append([]) #创建十行
for j in range(10):
a[i].append(0) #每行创建i列
# 杨辉三角边界都为1
for i in range(10):
a[i][0] = 1
a[i][i] = 1
# 杨辉三角定义,下一行数值为上一行数与上一行前面数之和(除边界)
for i in range(2,10):
for j in range(1,i):
a[i][j] = a[i - 1][j-1] + a[i - 1][j]
from sys import stdout
for i in range(10):
for j in range(i + 1):
stdout.write(str(a[i][j]))
stdout.write(' ')
print | [
"cbb903601682@163.com"
] | cbb903601682@163.com |
a5c232a38ca2baa4eef74f5b7a3ca714948d7295 | cb33631104dcd88c70f4654d91135ab0768459db | /mooc/apps/schools/adminx.py | 473820e4766180fcd9a38bbdad4f03b7ae2329e7 | [] | no_license | Shirlesha/DjangoMooc | 54956a4580e0f2b5d3ba2796f4794c912dd652f8 | db01795c97db55f18b69f22cd91a72cc03eaa312 | refs/heads/master | 2020-05-21T13:27:15.967861 | 2019-05-14T00:57:12 | 2019-05-14T00:57:12 | 186,072,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # encoding: utf-8
__author__ = 'Shirlesha'
__date__ = '2019/5/12 0009 08:02'
import xadmin
from .models import CityDict, SchoolName, Lecturer
class CityDictAdmin(object):
"""学校所属城市名后台管理器"""
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc']
list_filter = ['name', 'desc', 'add_time']
class SchoolNameAdmin(object):
"""学校课程信息管理器"""
list_display = ['name', 'desc', 'category', 'add_time']
search_fields = ['name', 'desc', 'category']
list_filter = ['name', 'desc', 'category', 'city__name', 'address', 'add_time']
class LecturerAdmin(object):
"""讲师后台管理器"""
list_display = ['name', 'school', 'work_years', 'work_company', 'work_position', 'add_time']
search_fields = ['school', 'name', 'work_years', 'work_company', 'work_position']
list_filter = ['school__name', 'name', 'work_years', 'work_company', 'work_position', 'add_time']
xadmin.site.register(CityDict, CityDictAdmin)
xadmin.site.register(SchoolName, SchoolNameAdmin)
xadmin.site.register(Lecturer, LecturerAdmin)
| [
"1251671603@qq.com"
] | 1251671603@qq.com |
6c4181efcbbbf3de4db6c12f8f7c78a49723c80b | 10dc5606b8de20a51e5468d2224805b2b52c5158 | /wei/app7.py | 3a9dd976d41ff013880e1c74b4f5971bea468a8b | [] | no_license | stevecheen/crawler | 22e006d73b9864ad6ab5be2bc527501ccf7d4bab | 567463855771a2933958023f2c984337f78e1a2f | refs/heads/master | 2022-03-26T21:09:00.291721 | 2019-12-12T07:47:50 | 2019-12-12T07:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,484 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
In this example, we determine the event sender
object.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from time import sleep
import random
import time
import re
import sys
from PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication
from requests import Session
import json
from bs4 import BeautifulSoup
from pygame import mixer # Load the required library
import datetime
import base64
import traceback
import threading
import time
import os
deskPath=os.path.join(os.path.expanduser('~'), "Desktop")
# 多线程
'''
1、创建8个处理线程、一个清理字典的线程【清理】、告警线程
2、全局变量定义好每个线程需要处理的请求
3、期号判断
'''
# 全局变量
data_url = '%s/frommesg?__=frommesg&gameIndex=3&page=%s&t=%s'
url=''
# 线程字典 {threadNo:{termNo:startPage:,endPage:,status:'0 没处理,1,已处理'}}
threadDic={}
# 告警字典
alarmDic={}
# 处理数据的id集合
data_ids = []
# 当前期数
current_cicle = ''
# 期数集合
tips = set()
p = 1
s = Session()
t=''
threadNo=32
max_data_id=0
class pullDataThread(threading.Thread): # 继承父类threading.Thread
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print("Starting " + self.name)
pullData(self.name)
print("Exiting " + self.name)
def pullData(threadName):
while True:
try:
if threadDic.get(threadName)!=None:
if len(threadDic[threadName])==0:
continue
l = threadDic[threadName]
for e in l:
if e.get('status') == 0:
# if e.get('termNo')== current_cicle:
termNo = e.get('termNo')
pageListStr = e.get('pageList')
data_list = []#任四
data_list1 = []#任一
data_list2 = []#任二
data_list3 = []#任三
data_list_all = []#任三
for p in str.split(pageListStr,'-'):
# p = i
response = s.get(data_url % (str(url), str(p), str(t)))
jsonobj = json.loads(response.text)
for e1 in jsonobj['list']:
code = e1[3]
money = e1[5]
soup = BeautifulSoup(e1[0], "html.parser")
data_id = soup.span.attrs["data-id"]
# result.append('%s=%s,' % (str(code), str(money)))
# if data_id not in data_ids:
# data_ids.append(data_id)
if str(code).find('球')!=-1:
# print(code[1])
# print(code.split('>')[1].split('<')[0])
code0 = code.split('>')[1].split('<')[0]
dic = {'一': '%sXXX', '二': 'X%XX', '三': 'XX%sX', '四': 'XXX%s'}
codeIndex = code[1]
codeStr = dic[codeIndex] % (str(code0))
data_list1.append('%s=%s' % (str(codeStr), str(money)))
data_list_all.append('%s=%s' % (str(codeStr), str(money)))
elif len(str(code).upper().split('X'))==2:
data_list3.append('%s=%s' % (str(code), str(money)))
data_list_all.append('%s=%s' % (str(code), str(money)))
elif len(str(code).upper().split('X')) == 3:
data_list2.append('%s=%s' % (str(code), str(money)))
data_list_all.append('%s=%s' % (str(code), str(money)))
elif len(str(code).upper().split('X')) == 1:
data_list.append('%s=%s' % (str(code), str(money)))
data_list_all.append('%s=%s' % (str(code), str(money)))
# else :
# # print(code[1])
# # print(code.split('>')[1].split('<')[0])
# code0=code.split('>')[1].split('<')[0]
# dic={'一':'%sXXX','二':'X%XX','三':'XX%sX','四':'XXX%s'}
# codeIndex=code[1]
# codeStr=dic[codeIndex]%(str(code0))
# data_list1.append('%s=%s' % (str(codeStr), str(money)))
# print(format_time(), "新码:", threadName,p, data_id, code, money)
with open(os.path.join(deskPath,'注单.txt'), 'a+', encoding='utf-8') as f:
if len(data_list_all) > 0:
f.write(',' + ','.join(data_list_all))
print(format_time(), threadName,'写入', len(data_list))
with open(os.path.join(deskPath,'任四.txt'), 'a+', encoding='utf-8') as f:
if len(data_list) > 0:
f.write(',' + ','.join(data_list))
print(format_time(), threadName,'写入', len(data_list))
with open(os.path.join(deskPath,'任三.txt'), 'a+', encoding='utf-8') as f:
if len(data_list3) > 0:
f.write(',' + ','.join(data_list3))
print(format_time(), threadName,'写入', len(data_list3))
with open(os.path.join(deskPath,'任二.txt'), 'a+', encoding='utf-8') as f:
if len(data_list2) > 0:
f.write(',' + ','.join(data_list2))
print(format_time(), threadName,'写入', len(data_list2))
with open(os.path.join(deskPath,'任一.txt'), 'a+', encoding='utf-8') as f:
if len(data_list1) > 0:
f.write(',' + ','.join(data_list1))
print(format_time(), threadName,'写入', len(data_list1))
e['status'] = 1
print(format_time(), 'pullData-----1', threadDic)
time.sleep(2)
except Exception as e:
print(format_time(), "pullData:", e)
# traceback.print_exc()
time.sleep(5)
# print("%s: %s" % (threadName, time.ctime(time.time())))
# def getPageData(pageList):
# # data_url = '%s/frommesg?__=frommesg&gameIndex=3&page=%s&t=%s'
# data_list=[]
# for i in pageList:
# p = i
# response = s.get(data_url % (str(url), str(p), str(t)))
# jsonobj = json.loads(response.text)
# for e in jsonobj['list']:
# code = e[3]
# money = e[5]
# soup = BeautifulSoup(e[0], "html.parser")
# data_id = soup.span.attrs["data-id"]
# # result.append('%s=%s,' % (str(code), str(money)))
# if data_id not in data_ids:
# # tip()
# data_ids.append(data_id)
# data_list.append('%s=%s' % (str(code), str(money)))
# print(format_time(), "新码:", p, data_id, code, money)
# with open('D:/%s.txt' % (str(current_cicle)), 'a+', encoding='utf-8') as f:
# if len(data_list) > 0:
# f.write(',' + ','.join(data_list))
# 告警线程 定时告警
class alarmThread(threading.Thread): # 继承父类threading.Thread
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print("Starting " + self.name)
dealAlarmData(alarmDic)
print("Exiting " + self.name)
# 告警字典处理
def dealAlarmData(_alarmDic):
while True:
newAlarmDic={}
print(format_time(), 'clearThreadData-----3', _alarmDic)
for (k,v) in alarmDic.items():
if v==0:
print(format_time(), 'dealAlarmData-----0', _alarmDic)
newAlarmDic[k]=v
tip()
v=1
print(format_time(), 'dealAlarmData-----1', _alarmDic)
_alarmDic=newAlarmDic
time.sleep(5)
# 定时清理已处理的线程中的期数
class clearThread(threading.Thread): # 继承父类threading.Thread
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print("Starting " + self.name)
clearThreadData(threadDic)
print("Exiting " + self.name)
def clearThreadData(_threadDic):
# threadDic={'thread-1':[{'termNo':'2019110201','startPage':'1','pageList':[1,3,5],'status':'0'},{'termNo':'2019110202','pageList':[1,3,5],'status':'0'}]}
while True:
newThreadDic = {}
for (k, v) in _threadDic.items():
newList = []
for d in v:
if d['status'] == '0':
print(format_time(),'threadDic',_threadDic)
newList.append(d)
newThreadDic[k] = newList
_threadDic=newThreadDic
print(format_time(), 'clearThreadData-----1', _threadDic)
time.sleep(5)
def getEndTime():
with open(r'D:/app/conf.txt', encoding='utf-8') as fb: ###被读者
content = fb.readlines()
decodestr= base64.b64decode(content[0].split(',')[1])
endDateStr=decodestr.decode()
print(format_time(), "************************************************到期时间为:", endDateStr)
return endDateStr
def format_time():
return "%s-%s-%s %s:%s:%s" % (
time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, time.localtime().tm_hour,
time.localtime().tm_min, time.localtime().tm_sec)
def format_filename():
return "%s%s%s%s%s%s" % (
time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, time.localtime().tm_hour,
time.localtime().tm_min, time.localtime().tm_sec)
def tip():
print(format_time(), '有新的下注码!!!!!')
mixer.init()
mixer.music.load('D:/app/tip.mp3')
mixer.music.play()
print(format_time(), '休息5秒')
time.sleep(5)
def isTimeOut(endDateStr):
end_time = int(datetime.datetime.strptime(endDateStr, '%Y-%m-%d').timestamp()) * 1000
now_time = int(datetime.datetime.now().timestamp() * 1000)
if now_time > end_time:
print(format_time(),'该会员已到期,到期日期为:',endDateStr)
return True
def defFiles():
if os.path.exists(os.path.join(deskPath, '注单.txt')):
os.remove(os.path.join(deskPath, '注单.txt'))
if os.path.exists(os.path.join(deskPath, '任四.txt')):
os.remove(os.path.join(deskPath, '任四.txt'))
if os.path.exists(os.path.join(deskPath, '任三.txt')):
os.remove(os.path.join(deskPath, '任三.txt'))
if os.path.exists(os.path.join(deskPath, '任二.txt')):
os.remove(os.path.join(deskPath, '任二.txt'))
if os.path.exists(os.path.join(deskPath, '任一.txt')):
os.remove(os.path.join(deskPath, '任一.txt'))
class Example(QMainWindow):
def __init__(self):
super().__init__()
# tip()
self.initUI()
def initUI(self):
btn1 = QPushButton("begin", self)
btn1.move(30, 50)
btn1.clicked.connect(self.buttonClicked)
self.statusBar()
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Event sender')
self.show()
def genma(self):
# print(format_time(), '************************************************到期日期为:', endDateStr)
# url = 'http://ag1.aa9797.com' vip888 ok0006 bb123456
# # url='http://ag1.td9898.com'#kkjj001 aa123456 vip666
# search_code = '147147'
# account = 'jk5599 '
# pwd = 'aa123456'
with open(r'D:/app/conf.txt', encoding='utf-8') as fb: ###被读者
content = fb.readlines()
# print(format_time(), "配置信息",content[0])
decodestr=content[0].split(',')[0]
global url
url = base64.b64decode(decodestr).decode()
# 创建8个处理线程
global threadNo
global threadDic
for i in range(threadNo):
threadDic["Thread-%s" % (str(i+1))]=[]
pullDataThread(i + 1, "Thread-%s" % (str(i+1))).start()
# alarmThread(1, "AlarmThread").start()
clearThread(1, "clearThread").start()
endDateStr = getEndTime()
if isTimeOut(endDateStr) == True:
exit()
print(format_time(),'代理网址为:',url)
# search_code=content[0].split(',')[1]
# username=content[0].split(',')[2]
# pwd=content[0].split(',')[3]
webdriver.ChromeOptions.binary_location = 'D:/app/chrome/chrome.exe'
"""会员登录 start"""
driver_account = webdriver.Chrome('D:/app/chromedriver.exe')
# driver_account.get(url)
# driver_account.get()
# driver_account.maximize_window()
driver_account.get(url)
# driver_account.find_element_by_id('search').send_keys(search_code)
# sleep(1)
# driver_account.find_element_by_id('btnSearch').click()
# sleep(3)
# driver_account.find_element_by_id('username').send_keys(username)
# sleep(1)
# driver_account.find_element_by_id('pass').send_keys(pwd)
"""会员登录 end"""
print(format_time(), "一分钟内快速登录!!!!!")
sleep(60)
# while True:
# wait.until(EC.presence_of_element_located((By.ID, 'shell_title')))
# if driver_account.find_element_by_xpath('//*[@id="shell_title"]').text == '最新公告':
# print(format_time(), "会员登录成功")
# break
# sleep(5)
# Reddit will think we are a bot if we have the wrong user agent
selenium_user_agent = driver_account.execute_script("return navigator.userAgent;")
s.headers.update({"user-agent": selenium_user_agent})
for cookie in driver_account.get_cookies():
s.cookies.set(cookie['name'], cookie['value'], domain=cookie['domain'])
global t
t= driver_account.page_source.split('autoTid')[1].split(':')[1].split(',')[0].replace('\\', '').replace('"','')
print(format_time(), "t:", t)
while True:
try:
global current_cicle
global alarmDic
global max_data_id
# data_url ='%s/frommesg?__=frommesg&gameIndex=%s&settlement=0&beforeDate=2019-11-01×Num=20191101016&category_id=0&t=%s'
response = s.get(data_url % (str(url),str(p), str(t)))
jsonobj = json.loads(response.text)
totalPage = jsonobj['totalPage']
# 新的一期做处理 1、清理数据【ids】 2、告警数据处理
cur_max_data_id=0
if (len(jsonobj['list']) > 0):
cur_max_data_id = BeautifulSoup(jsonobj['list'][0][0], "html.parser").span.attrs['data-id']
if current_cicle != jsonobj['numAry'][0]:#新的一期处理
max_data_id=0
data_ids.clear()
if len(jsonobj['list'])>0:
current_cicle = jsonobj['numAry'][0]
max_data_id=cur_max_data_id
# 告警处理
if current_cicle not in alarmDic.keys():
alarmDic[current_cicle]=0
defFiles()
tip()
else:
sleep(5)
continue
else:#同一期处理
if(int(cur_max_data_id)==int(max_data_id)):
sleep(5)
continue
else:
if(cur_max_data_id!=0):
defFiles()
max_data_id=cur_max_data_id
for i in range(totalPage):
threadName="Thread-%s" % (str(i%threadNo+1))
# threadDic = {
# 'thread-1': [{'termNo': '2019110201', 'pageList': 1-3-5, 'status': '0'},
# {'termNo': '2019110202', 'pageList': [1, 3, 5], 'status': '0'}]}
if threadDic.get(threadName)!=None:
l = threadDic[threadName]
hasNo=False
for e in l:
if e.get('termNo')== current_cicle and e['status']==0:
hasNo=True
if str(i+1) not in str(e['pageList']).split('-'):
e['pageList']=e['pageList']+'-'+str(i+1)
if hasNo==False:
newList=[]
e={}
e['termNo'] = current_cicle
e['status'] = 0
e['pageList'] = str(i + 1)
newList.append(e)
threadDic[threadName]=newList
print(format_time(), 'threadDic',threadDic)
print(format_time(), 'alarmDic',alarmDic)
# list = jsonobj['list']
# if len(list) >0 :
# tip()
print(format_time(), current_cicle,"休息5s!!!!")
time.sleep(5)
except Exception as e:
print(format_time(),e)
# traceback.print_exc()
time.sleep(5)
# driver_account.refresh()
# self.genma()
# with open('D:/app/%s.log' % (str(int(time.strftime('%Y%m%d')))), 'a',
# encoding='utf-8') as f:
# f.write(log_str + "\n")
def buttonClicked(self):
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
self.genma()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| [
"xumajie88@163.com"
] | xumajie88@163.com |
3edf4d96d625b272d07a4bfa4b82d171d201b46f | b09e596edb48976ceea7e512714a388338e4908f | /DDP_LAB9/main.py | 41c357da9943843aba9c68d8eee4ded4db0bdbf2 | [] | no_license | ztirrataufik/Dasar-Dasar-Pemrograman | 9ee186d8ff6a5ee5bf8b4e3126d2036308a52e2d | f083f2cae4096be3618f449f7b3f1eb06f9242f0 | refs/heads/main | 2023-03-07T03:28:56.057631 | 2021-02-16T05:42:32 | 2021-02-16T05:42:32 | 304,260,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | # Nama:
# NIM:
# Kelas:
def sort_desc(arr):
i = 0 #membuat varianbel/inisialisasi variabel i untuk mengunci dalam iterasi
while i < len(arr)-1:#perulangan i dengan kondisi lebih kecil dari variabel array dikurangi 1
j = 0#membuat variabel/inisialisasi varibel j untuk mengunci dalam iterasi
while j < len(arr)-i-1:#perulangan j dengan kondisi lebih kecil dari variabel array dikurangi variabel i dikurangi 1
if arr[j] < arr[j+1]:#kondisi ketika variabel array dengan posisi j lebih kecil dari array j + 1
arr[j], arr[j+1] = arr[j+1], arr[j]#pertukaran antara variabel array j dengan variabel array j+1
j += 1#digunakan agar perulangan dengan kondisi varibel j bisa menyeimbangkan nilai dari panjang variabel array
i += 1#digunakan agar perulangan dengan kondisi variabel i bisa menyeimbangkan nilai dari panjang variabel array
return arr#pengembalian nilai
#hasil diskusi dengan teman-teman
# Mulai baris ini hingga baris paling bawah
# digunakan untuk mengetes fungsi yang telah dibuat.
# Tidak perlu mengubah bagian ini.
# Ketika dijalankan, program akan menampilkan contoh
# pemanggilan fungsi dan solusi yang seharusnya.
# Cocokkan hasil pemanggilan fungsi dengan solusi
# yang seharusnya.
def test():
r = sort_desc([2, 3, 1, 0, 4])
print(f"sort_desc([2, 3, 1, 0, 4]) = {r} \n(solusi: [4, 3, 2, 1, 0]")
print()
r = sort_desc([1, 2, 3])
print(f"sort_desc([1, 2, 3]) = {r} \n(solusi: [3, 2, 1]")
print()
if __name__ == '__main__':
test() | [
"noreply@github.com"
] | noreply@github.com |
c216efa4d1718ae2ddfb65ef0d24f2825156d9ab | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03455/s808955934.py | 5a703a489b980a233dbc16c583c6ec0ff1dc188d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # ABC 086
A,B= map(int,input().split())
if (A* B)%2==0:
ans ='Even'
else:
ans='Odd'
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e1bf319ac4b1a93b08f0dafc5fd453b9cd95d5b1 | 4e44974b9e59dfd4324d84b12b10f008117814cd | /test_autofit/integration/src/dataset/dataset.py | c3dc9773c00b8b4cc97f43fc249734b1546be650 | [
"MIT"
] | permissive | PriyatamNayak/PyAutoFit | 2cc2608943f8c3bdbda3b268142e7307014ccaf2 | 32c0c30acd219030c86a12db82ae54e406fd7119 | refs/heads/master | 2023-03-04T07:27:41.547966 | 2021-02-11T23:21:00 | 2021-02-11T23:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,487 | py | from astropy.io import fits
import numpy as np
# The 'dataset.py' module has been extended to give the dataset a name and metadata.
class Dataset:
def __init__(self, data, noise_map, name=None):
"""A class containing the data and noise-map of a 1D line dataset.
Parameters
----------
data : np.ndarray
The array of the data, in arbitrary units.
noise_map : np.ndarray
An array describing the RMS standard deviation error in each data pixel, in arbitrary units.
"""
self.data = data
self.noise_map = noise_map
# The name of the dataset is used by the aggregator, to determine the name of the file the dataset is saved as
# and so that when using the aggregator you can know which dataset you are manipulating.
self.name = name if name is str else "dataset"
@property
def xvalues(self):
return np.arange(self.data.shape[0])
@classmethod
def from_fits(cls, data_path, noise_map_path, name=None):
"""Load the data and noise-map of a 1D line dataset from ``.fits`` files.
Parameters
----------
data_path : str
The path on your hard-disk to the ``.fits`` file of the data.
noise_map_path : str
The path on your hard-disk to the ``.fits`` file of the noise-map.
"""
data_hdu_list = fits.open(data_path)
noise_map_hdu_list = fits.open(noise_map_path)
data = np.array(data_hdu_list[0].data)
noise_map = np.array(noise_map_hdu_list[0].data)
return Dataset(data=data, noise_map=noise_map, name=name)
class MaskedDataset:
def __init__(self, dataset, mask):
"""
A masked dataset, which is an image, noise-map and mask.
Parameters
----------
dataset: im.Dataset
The dataset (the image, noise-map, etc.)
mask: msk.Mask2D
The 1D mask that is applied to the dataset.
"""
self.dataset = dataset
self.mask = mask
self.data = dataset.data * np.invert(mask)
self.noise_map = dataset.noise_map * np.invert(mask)
@property
def xvalues(self):
return np.arange(self.data.shape[0])
def signal_to_noise_map(self):
return self.data / self.noise_map
def with_left_trimmed(self, data_trim_left):
if data_trim_left is None:
return self
# Here, we use the existing masked dataset to create a trimmed dataset.
data_trimmed = self.dataset.data[data_trim_left:]
noise_map_trimmed = self.dataset.noise_map[data_trim_left:]
dataset_trimmed = Dataset(data=data_trimmed, noise_map=noise_map_trimmed)
mask_trimmed = self.mask[data_trim_left:]
return MaskedDataset(dataset=dataset_trimmed, mask=mask_trimmed)
def with_right_trimmed(self, data_trim_right):
if data_trim_right is None:
return self
# We do the same as above, but removing data to the right.
data_trimmed = self.dataset.data[:-data_trim_right]
noise_map_trimmed = self.dataset.noise_map[:-data_trim_right]
dataset_trimmed = Dataset(data=data_trimmed, noise_map=noise_map_trimmed)
mask_trimmed = self.mask[:-data_trim_right]
return MaskedDataset(dataset=dataset_trimmed, mask=mask_trimmed)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
739d5d61bfa888beb429e214efd7a4a9aabeb4e9 | 2a1e18918445041b4c40ccfd33e39b08d5280215 | /Simple-Scrape.py | 68bd4a7ab5f6e554436dca524484fac8b1ecaba9 | [] | no_license | Kristoff-Harris/Code-Templates-For-CTFs | e18ce530715b12b1869fa47e3b7f658f4c24e1e3 | d55e927516607824cba9b5a34138ba8c7fe9b51d | refs/heads/master | 2021-05-23T11:27:07.613202 | 2020-04-11T03:00:41 | 2020-04-11T03:00:41 | 253,265,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # Some code reused fom freeCodeCamp() https://www.freecodecamp.org/news/how-to-scrape-websites-with-python-and-beautifulsoup-5946935d93fe/
import requests as req
import hashlib
from bs4 import BeautifulSoup
# specify the url
quote_page = req.get('http://docker.hackthebox.eu:32559')
# parse the html using beautiful soup and store in variable `soup`
soup = BeautifulSoup(quote_page.text, 'html.parser')
# Take out the <div> of name and get its value
original_value = soup.find('h3')
value_to_hash = original_value.text.strip()
hashed_val = hashlib.md5(value_to_hash.encode())
print('Value to hash is : ' + str(value_to_hash))
print('Hashed Value is :' + str(hashed_val.hexdigest()))
post_data = {'hash': hashed_val}
post_req_response = req.post('http://docker.hackthebox.eu:32559', data=post_data )
print(post_req_response.text)
| [
"noreply@github.com"
] | noreply@github.com |
f460837b8756ad3ec60887f952b1d9d59c97f4cf | 19924ac187843f4131281c2a3376bf446c1be510 | /PyCharm/Prob3/venv/bin/python-config | 485ba52de9a39bf54f5d685b34d010c694378504 | [] | no_license | mcfaddja/InfoTheory-MidTerm-py | f3768c9e27c6a734f9d4f850cb9e0553689ad407 | a180abce6039b03d3e750c62371aa9ccc6aa2da2 | refs/heads/master | 2021-03-27T11:46:02.760514 | 2018-05-20T02:38:10 | 2018-05-20T02:38:10 | 120,137,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | #!/Users/jamster/Documents/GitHub/InfoTheory-MidTerm-py/PyCharm/Prob3/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"mcfaddja@uw.edu"
] | mcfaddja@uw.edu | |
8381a8a31b17787de19e3c19f0c3f447232dc858 | dd1a27f51d4a4bf9f450617aa8562069758b1d82 | /business/admin.py | 686f5f0887c40c9e40e321d47234f167ebc9f9de | [] | no_license | PC97/creditSol | 7afcffff1c1fbcc0aae789368fbd7b973a076a9d | 6a694b035cf72475a9f0791fd8aafa5484e8fff3 | refs/heads/master | 2020-04-03T12:40:11.338677 | 2018-11-14T17:20:27 | 2018-11-14T17:20:27 | 155,259,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from django.contrib import admin
from .models import BusinessAccount,UploadFile
# Register your models here.
admin.site.register(BusinessAccount)
admin.site.register(UploadFile)
| [
"pranjilc@gmail.com"
] | pranjilc@gmail.com |
73b57c37c9bda1b42aceaaaefec1a06ed28cd9ae | c7d4e46dee70c56dc790408193fd1ddaebc13c2f | /Problem29/Problem29.py | eb387f81b9de9de3f09f8ca1cfe8f97e9f1bb475 | [] | no_license | schnitzlMan/ProjectEuler | c27490b6c87c45f22053409ab71ae6f929fbbf79 | 92e2bd3cfe3bffd44a1da474447959fcc6670a56 | refs/heads/master | 2020-05-24T07:31:45.265350 | 2017-03-13T20:37:05 | 2017-03-13T20:37:05 | 84,832,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 14:07:36 2017
@author: Peter
"""
# below is my solution - I saw that 'set' would work out nicely (probably from math)
# then I can always add, but the set only grows if new elements entered.
maxNum = 100
listOfPowers=[]
for i in range(2, maxNum+1):
for j in range(2,maxNum+1):
thisVal = i**j
if not thisVal in listOfPowers:
listOfPowers.append(thisVal)
#print(i,j,listOfPowers)
print(len(listOfPowers)) | [
"thekolbfamily@gmail.com"
] | thekolbfamily@gmail.com |
eede4029fa65ba5d911e0a4bfa27617b556819c1 | 3bb3f9779755cb2c54d7c25730402f3a3e2ad668 | /lib_controller/settings.py | 28fcfb093e8f44402dd8c4b36b79f423a0026a16 | [] | no_license | trunghieule0301/demo-luan-van | d1b86ffbb273f9cc77bef791c372538abd27bc11 | 079df0584f9f416b2fe97cde4f7dc1e960869e48 | refs/heads/main | 2023-07-12T03:07:44.530813 | 2021-08-13T16:18:01 | 2021-08-13T16:18:01 | 395,297,825 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,497 | py | """
Django settings for lib_controller project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ng9g1oida5a)v-8ku%hwn^rne@e(f-sm=+y)t!^ygw30v)9x2w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['lit-wildwood-47230.herokuapp.com', '127.0.0.1:8000']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'corsheaders',
'rest_auth',
'rest_auth.registration',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'lib_controller.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'frontend/build')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lib_controller.wsgi.application'
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
MEDIA_URL = ''
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = os.path.join(BASE_DIR, '')
CORS_ORIGIN_ALLOW_ALL = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = False
REST_FRAMEWORK = {
'USER_DETAILS_SERIALIZER': 'path.to.custom.GameUserSerializer',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend']
}
| [
"noreply@github.com"
] | noreply@github.com |
f5e3f313c4584c8f5380fa9186122cf9b6227947 | 4cda6686b659c0bf34213a4c5faf050c4c866eea | /ExperimentSpecificCode/_2018_2019_Neuroseeker_Paper/_2019_Neuroseeker_Paper_Expanded/_33p1/NeuropixelSim/Sparce/spikesort_and_timelock_analysis.py | 841c428d900ac79f30a6bfcc8d9155a019730e56 | [] | no_license | georgedimitriadis/themeaningofbrain | da99efcf62af67bc6c2a71e504765026a4491217 | f138cf500a3ca6c8d76613c942787d9f073d67a7 | refs/heads/master | 2023-02-21T10:52:18.771691 | 2023-02-17T08:23:09 | 2023-02-17T08:23:09 | 50,346,965 | 3 | 1 | null | 2017-06-17T16:29:47 | 2016-01-25T11:42:43 | Python | UTF-8 | Python | false | false | 12,478 | py |
"""
The pipeline for spikesorting this dataset
"""
import numpy as np
from os.path import join
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing as preproc
from BrainDataAnalysis.Spike_Sorting import positions_on_probe as spp
from spikesorting_tsne_guis import clean_kilosort_templates as clean
from spikesorting_tsne import preprocessing_kilosort_results as preproc_kilo
from ExperimentSpecificCode._2018_2019_Neuroseeker_Paper._2019_Neuroseeker_Paper_Expanded._33p1 \
import constants_33p1 as const_rat
from ExperimentSpecificCode._2018_2019_Neuroseeker_Paper._2019_Neuroseeker_Paper_Expanded \
import constants_common as const_com
import BrainDataAnalysis.neuroseeker_specific_functions as ns_funcs
from ExperimentSpecificCode._2018_Chronic_Neuroseeker_TouchingLight.Common_functions import events_sync_funcs as \
sync_funcs, firing_rates_sync_around_events_funcs as fr_funcs
from BrainDataAnalysis.Statistics import binning
import common_data_transforms as cdt
import sequence_viewer as sv
import slider as sl
# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc = "FOLDERS NAMES"
date = 1
binary_data_filename = join(const_rat.base_save_folder, const_rat.rat_folder, const_rat.date_folders[date],
'Analysis', 'Denoised', 'Data', 'Amplifier_APs_Denoised.bin')
analysis_folder = join(const_rat.base_save_folder, const_rat.rat_folder, const_rat.date_folders[date],
'Analysis', 'NeuropixelSimulations', 'Sparce')
kilosort_folder = join(analysis_folder, 'Kilosort')
data_folder = join(const_rat.base_save_folder, const_rat.rat_folder, const_rat.date_folders[date], 'Data')
events_folder = join(data_folder, "events")
event_dataframes = ns_funcs.load_events_dataframes(events_folder, sync_funcs.event_types)
results_folder = join(analysis_folder, 'Results')
events_definitions_folder = join(results_folder, 'EventsDefinitions')
sampling_freq = const_com.SAMPLING_FREQUENCY
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# STEP 1: RUN KILOSORT ON THE DATA
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc = "STEP 2: CLEAN SPIKESORT (RIGHT AFTER KILOSORT)"
# a) Create average of templates:
# To create averages of templates use cmd (because the create_data_cubes doesn't work when called from a REPL):
# Go to where the create_data_cubes.py is (in spikesort_tsne_guis/spikesort_tsen_guis) and run the following python command
# (you can use either the raw or the denoised data to create the average)
# python E:\Software\Develop\Source\Repos\spikesorting_tsne_guis\spikesorting_tsne_guis\create_data_cubes.py
# original
# "D:\\AK_33.1\2018_04_30-11_38\Analysis\NeuropixelSimulations\Sparce\Kilosort"
# "D:\\AK_33.1\2018_04_30-11_38\Analysis\Denoised\Data\Amplifier_APs_Denoised.bin"
# 1368
# 50
# (Use single space between parameters, not Enter like here)
# (Change the folders as appropriate for where the data is)
# b) Clean:
clean.cleanup_kilosorted_data(kilosort_folder,
number_of_channels_in_binary_file=const_com.NUMBER_OF_AP_CHANNELS_IN_BINARY_FILE,
binary_data_filename=binary_data_filename,
prb_file=const_com.prb_file,
type_of_binary=const_com.BINARY_FILE_ENCODING,
order_of_binary='F',
sampling_frequency=20000,
num_of_shanks_for_vis=5)
# c) Remove some types
template_marking = np.load(join(kilosort_folder, 'template_marking.npy'))
print(len(np.argwhere(template_marking == 0)))
print(len(np.argwhere(template_marking == 1)))
print(len(np.argwhere(template_marking == 2)))
print(len(np.argwhere(template_marking == 3)))
print(len(np.argwhere(template_marking == 4)))
print(len(np.argwhere(template_marking == 5)))
print(len(np.argwhere(template_marking == 6)))
print(len(np.argwhere(template_marking == 7)))
template_marking[np.argwhere(template_marking == 5)] = 0
template_marking[np.argwhere(template_marking == 6)] = 0
np.save(join(kilosort_folder, 'template_marking.npy'), template_marking)
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc = "STEP 3: CREATE TEMPLATE INFO OF ALL THE CLEANED TEMPLATES"
# a) Create the positions of the templates on the probe (and have a look)
_ = spp.generate_probe_positions_of_templates(kilosort_folder)
bad_channel_positions = spp.get_y_spread_regions_of_bad_channel_groups(kilosort_folder, const_rat.bad_channels)
spp.view_grouped_templates_positions(kilosort_folder, const_rat.BRAIN_REGIONS, const_com.PROBE_DIMENSIONS,
const_com.POSITION_MULT)
# b) Create the template_info.df dataframe (or load it if you already have it)
# template_info = preproc_kilo.generate_template_info_after_cleaning(kilosort_folder, sampling_freq)
template_info = np.load(join(kilosort_folder, 'template_info.df'), allow_pickle=True)
# c) Make the spike info from the initial, cleaned, kilosort results
# spike_info = preproc_kilo.generate_spike_info_after_cleaning(kilosort_folder)
spike_info = np.load(join(kilosort_folder, 'spike_info_after_cleaning.df'), allow_pickle=True)
spp.view_grouped_templates_positions(kilosort_folder, const_rat.BRAIN_REGIONS, const_com.PROBE_DIMENSIONS,
const_com.POSITION_MULT, template_info=template_info)
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc = "CALCULATE SPIKING RATES">
# Make the spike rates using each frame as a binning window
# Load the pre generated DataFrames for the event CSVs
event_dataframes = ns_funcs.load_events_dataframes(events_folder, sync_funcs.event_types)
file_to_save_to = join(kilosort_folder, 'firing_rate_with_video_frame_window.npy')
template_info = pd.read_pickle(join(kilosort_folder, 'template_info.df'))
spike_info = pd.read_pickle(join(kilosort_folder, 'spike_info_after_cleaning.df'))
spike_rates = binning.spike_count_per_frame(template_info, spike_info, event_dataframes['ev_video'],
sampling_freq, file_to_save_to=file_to_save_to)
# Using the frame based spikes rates do a rolling window to average a bit more
num_of_frames_to_average = 0.25/(1/120)
spike_rates_0p25 = []
for n in np.arange(spike_rates.shape[0]):
spike_rates_0p25.append(binning.rolling_window_with_step(spike_rates[n, :], np.mean,
num_of_frames_to_average, num_of_frames_to_average))
spike_rates_0p25 = np.array(spike_rates_0p25)
np.save(join(kilosort_folder, 'firing_rate_with_0p25s_window.npy'), spike_rates_0p25)
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------
# <editor-fold desc="GET TIMES AND FRAMES OF SUCCESSFUL TRIALS">
video_frame_spike_rates_filename = join(kilosort_folder, 'firing_rate_with_video_frame_window.npy')
spike_rates = np.load(video_frame_spike_rates_filename)
camera_pulses, beam_breaks, sounds = \
sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True,
cam_ttl_pulse_period=
const_com.CAMERA_TTL_PULSES_TIMEPOINT_PERIOD)
sounds_dur = sounds[:, 1] - sounds[:, 0]
reward_sounds = sounds[sounds_dur < 4000]
# Using the trialend csv file to generate events
# succesful_trials = event_dataframes['ev_trial_end'][event_dataframes['ev_trial_end']['Result'] == 'Food']
# succesful_trials = succesful_trials['AmpTimePoints'].values
# Using the start of the reward tone to generate events
# There is a difference of 78.6 frames (+-2) between the reward tone and the csv file event (about 700ms)
succesful_trials = reward_sounds[:, 0]
# Get the average firing rates of all neurons a few seconds around the successful pokes
time_around_beam_break = 8
avg_firing_rate_around_suc_trials = fr_funcs.get_avg_firing_rates_around_events(spike_rates=spike_rates,
event_time_points=succesful_trials,
ev_video_df=event_dataframes['ev_video'],
time_around_event=time_around_beam_break)
events_random = np.random.choice(np.arange(succesful_trials.min(), succesful_trials.max(), 100),
len(succesful_trials), replace=False)
avg_firing_rate_around_random_times = fr_funcs.get_avg_firing_rates_around_events(spike_rates=spike_rates,
event_time_points=events_random,
ev_video_df=event_dataframes['ev_video'],
time_around_event=time_around_beam_break)
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc="LOOK AT ALL THE NEURONS AROUND THE POKE EVENT">
smooth_time = 0.5
smooth_frames = smooth_time * 120
t = binning.rolling_window_with_step(avg_firing_rate_around_random_times, np.mean, smooth_frames, int(smooth_frames / 3))
tn = preproc.normalize(t, norm='l1', axis=0)
tn = np.asarray(t)
for i in np.arange(len(t)):
tn[i, :] = binning.scale(t[i], 0, 1)
y_positions = template_info['position Y'].values
position_sorted_indices = np.argsort(y_positions)
regions_pos = list(const_rat.BRAIN_REGIONS.values())
region_lines = []
for rp in regions_pos:
region_lines.append(sync_funcs.find_nearest(y_positions[position_sorted_indices] * const_com.POSITION_MULT, rp)[0])
region_lines = np.array(region_lines)
tns = tn[position_sorted_indices]
plt.imshow(np.flipud(tns), aspect='auto')
plt.hlines(y=len(t) - region_lines, xmin=0, xmax=len(tns[0])-1, linewidth=3, color='w')
plt.vlines(x=int(len(tns[0]) / 2), ymin=0, ymax=len(tns) - 1)
plt.imshow(np.flipud(tns), aspect='auto', extent=[-8, 8, len(tns), 0])
plt.hlines(y=len(t) - region_lines, xmin=-8, xmax=8, linewidth=2, color='w')
plt.vlines(x=0, ymin=0, ymax=len(tns) - 1)
i = 0
sv.graph_pane(globals(), 'i', 'tn')
time_around_beam_break = 8
index = 0
fig1 = plt.figure(1)
fig2 = plt.figure(2)
output = None
all_indices = np.arange(len(avg_firing_rate_around_suc_trials))
frames_around_beam_break = 120 *time_around_beam_break
args = [all_indices, avg_firing_rate_around_suc_trials, template_info, spike_info,
succesful_trials, frames_around_beam_break, fig1, fig2]
show_rasters_decrease = fr_funcs.show_rasters_for_live_update
sl.connect_repl_var(globals(), 'index', 'output', 'show_rasters_decrease', 'args',
slider_limits=[0, len(avg_firing_rate_around_suc_trials) - 1])
# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------
| [
"gdimitri@hotmail.com"
] | gdimitri@hotmail.com |
e8b093baa3e7fbf51757ad4f26b06464fa79c48e | 6c545543a21286ebd33ce13053346d1f9349bc40 | /Random/chfnrn.py | 969ad35090dbaa3e883873f0a7b17051bc8cdfe9 | [] | no_license | Sunil02324/Hackathon-Solutions-Collection | c426c186b977e39bc0706c006c6d8f5a0f4dd9e8 | 1fcb1723d572132b08a73db6e26d5e8f4352b2f4 | refs/heads/master | 2021-01-10T15:09:45.766591 | 2017-03-31T06:09:20 | 2017-03-31T06:09:20 | 51,132,623 | 0 | 1 | null | 2016-02-29T20:21:19 | 2016-02-05T07:30:29 | Python | UTF-8 | Python | false | false | 186 | py | t = int(raw_input())
while (t>0):
t = t -1
n,m = map(int, raw_input().split())
a = []
b = []
for i in range(0,m):
x,y = map(int, raw_input().split())
a.append(x)
b.append(y)
| [
"sunil.iitkgp13@gmail.com"
] | sunil.iitkgp13@gmail.com |
56940990f0681c33a9760d965c2758cc0ef87ec9 | f171d4918c7a1fe65c8a02775681e970f8f24bc1 | /Fill Rate by Supplier_WS.py | 8beb3b8114509d04e49c41f45490a4211e3371df | [] | no_license | gamut-code/attribute_mapping | 82ef02d3fdb600b64f00e93c33e7f0efe2c61b90 | 90cb623e39162ba01e5764d874d251503fe77e1d | refs/heads/master | 2021-11-21T10:01:14.902134 | 2021-08-10T15:13:44 | 2021-08-10T15:13:44 | 207,662,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,840 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:34:02 2020
@author: xcxg109
"""
import pandas as pd
import numpy as np
from GWS_query import GWSQuery
import WS_query_code as q
import WS_file_data as fd
import settings_NUMERIC as settings
import time
ws_basic_query="""
SELECT
tprod."gtPartNumber" as "WS_SKU"
, tprod."categoryId" AS "GWS_Node_ID"
, supplier."supplierNo" as "Supplier_ID"
FROM taxonomy_product tprod
INNER JOIN supplier_product supplier
ON supplier.id = tprod."supplierProductId"
WHERE {} IN ({})
"""
ws_attr_values="""
WITH RECURSIVE tax AS (
SELECT id,
name,
ARRAY[]::INTEGER[] AS ancestors,
ARRAY[]::character varying[] AS ancestor_names
FROM taxonomy_category as category
WHERE "parentId" IS NULL
AND category.deleted = false
UNION ALL
SELECT category.id,
category.name,
tax.ancestors || tax.id,
tax.ancestor_names || tax.name
FROM taxonomy_category as category
INNER JOIN tax ON category."parentId" = tax.id
WHERE category.deleted = false
)
SELECT
array_to_string(tax.ancestor_names || tax.name,' > ') as "PIM_Path"
, tax.ancestors[1] as "WS_Category_ID"
, tax.ancestor_names[1] as "WS_Category_Name"
, tprod."categoryId" AS "WS_Node_ID"
, tax.name as "WS_Node_Name"
, tprod."gtPartNumber" as "WS_SKU"
, supplier."supplierNo" as "Supplier_ID"
, tprod.supplier as "Supplier_Name"
, tax_att.id as "WS_Attr_ID"
-- , tprodvalue.id as "WS_Attr_Value_ID"
-- , tax_att."multiValue" as "Multivalue"
-- , tax_att."dataType" as "Data_Type"
-- , tax_att."numericDisplayType" as "Numeric_Display_Type"
-- , tax_att.description as "WS_Attribute_Definition"
, tax_att.name as "WS_Attribute_Name"
, tprodvalue.value as "Original_Value"
-- , tprodvalue.unit as "Original_Unit"
-- , tprodvalue."valueNormalized" as "Normalized_Value"
-- , tprodvalue."unitNormalized" as "Normalized_Unit"
-- , tprodvalue."numeratorNormalized" as "Numerator"
-- , tprodvalue."denominatorNormalized" as "Denominator"
-- , tax_att."unitGroupId" as "Unit_Group_ID"
, tax_att.rank as "Rank"
, tax_att.priority as "Priority"
FROM taxonomy_product tprod
INNER JOIN tax
ON tax.id = tprod."categoryId"
-- AND (4458 = ANY(tax.ancestors)) --OR 8215 = ANY(tax.ancestors) OR 7739 = ANY(tax.ancestors)) -- *** ADD TOP LEVEL NODES HERE ***
AND tprod.status = 3
FULL OUTER JOIN taxonomy_attribute tax_att
ON tax_att."categoryId" = tprod."categoryId"
AND tax_att.deleted = 'false'
FULL OUTER JOIN taxonomy_product_attribute_value tprodvalue
ON tprod.id = tprodvalue."productId"
AND tax_att.id = tprodvalue."attributeId"
AND tprodvalue.deleted = 'false'
FULL OUTER JOIN supplier_product supplier
ON supplier.id = tprod."supplierProductId"
WHERE {} IN ({})
"""
pd.options.mode.chained_assignment = None
gws = GWSQuery()
def get_category_fill_rate(cat_df):
browsable_skus = cat_df
browsable_skus['Original_Value'].replace('', np.nan, inplace=True)
#calculate fill rates at the attribute / category level
total = browsable_skus['WS_SKU'].nunique()
print ('cat total = ', total)
browsable_skus = browsable_skus.drop_duplicates(subset=['WS_SKU', 'WS_Attr_ID'])
browsable_skus.dropna(subset=['Original_Value'], inplace=True)
# browsable_skus['Category_Fill_Rate_%'] = (browsable_skus.groupby('WS_Attr_ID')['WS_Attr_ID'].transform('count')/total)*100
# browsable_skus['Category_Fill_Rate_%'] = browsable_skus['Category_Fill_Rate_%'].map('{:,.2f}'.format)
browsable_skus['Category_Fill_Rate_%'] = (browsable_skus.groupby('WS_Attr_ID')['WS_Attr_ID'].apply(lambda x: x.notnull().mean))
browsable_skus['Category_Fill_Rate_%'] = browsable_skus['Category_Fill_Rate_%'].map('{:,.2f}'.format)
fill_rate_cat = pd.DataFrame(browsable_skus.groupby(['WS_Attr_ID'])['Category_Fill_Rate_%'].count()/total*100).reset_index()
browsable_skus = browsable_skus[['WS_Category_ID', 'WS_Category_Name', 'WS_Node_ID', 'WS_Node_Name', \
'Supplier_ID', 'Supplier_Name', 'WS_Attr_ID', 'WS_Attribute_Name', \
'Priority', 'Rank']]
browsable_skus = browsable_skus.drop_duplicates(subset='WS_Attr_ID')
fill_rate_cat = fill_rate_cat.merge(browsable_skus, how= "inner", on=['WS_Attr_ID'])
fill_rate_cat['Category_Fill_Rate_%'] = fill_rate_cat['Category_Fill_Rate_%'].map('{:,.2f}'.format)
fill_rate_cat = fill_rate_cat.merge(browsable_skus, how= "inner", on=['WS_Category_ID', \
'WS_Category_Name', 'WS_Node_ID', 'WS_Node_Name', \
'Supplier_ID', 'Supplier_Name', 'WS_Attr_ID', \
'WS_Attribute_Name', 'Priority', 'Rank'])
return fill_rate_cat
def get_supplier_fill_rate(df):
# note: df here is already segregated by category_ID
browsable_skus = df
#calculate fill rates at the attribute / category level
total = browsable_skus['WS_SKU'].nunique()
print ('sup total = ', total)
browsable_skus = browsable_skus.drop_duplicates(subset=['WS_SKU', 'WS_Attr_ID'])
browsable_skus.dropna(subset=['Original_Value'], inplace=True)
browsable_skus['Category_Supplier_Fill_Rate_%'] = (browsable_skus.groupby('WS_Attr_ID')['WS_Attr_ID'].apply(lambda x: x.notnull().mean))
browsable_skus['Category_Supplier_Fill_Rate_%'] = browsable_skus['Category_Supplier_Fill_Rate_%'].map('{:,.2f}'.format)
browsable_skus['Batch_Supplier_Fill_Rate_%'] = (browsable_skus.groupby('WS_Attr_ID')['WS_Attr_ID'].apply(lambda x: x.notnull().mean))
browsable_skus['Batch_Supplier_Fill_Rate_%'] = browsable_skus['Batch_Supplier_Fill_Rate_%'].map('{:,.2f}'.format)
fill_rate_sup = pd.DataFrame(browsable_skus.groupby(['WS_Attr_ID'])['Category_Supplier_Fill_Rate_%'].count()/total*100).reset_index()
browsable_skus = browsable_skus[['WS_Attr_ID']].drop_duplicates(subset='WS_Attr_ID')
fill_rate_sup = fill_rate_sup.merge(browsable_skus, how= "inner", on=['WS_Attr_ID'])
fill_rate_sup['Category_Supplier_Fill_Rate_%'] = fill_rate_sup['Category_Supplier_Fill_Rate_%'].map('{:,.2f}'.format)
fill_rate_sup = fill_rate_sup.merge(browsable_skus, how= "inner", on=['WS_Attr_ID'])
fill_rate_sup.to_csv('C:/Users/xcxg109/NonDriveFiles/browse.csv')
return fill_rate_sup
def data_out(df, atts_df, batch=''):
# output for sku-based pivot table
fill = atts_df[['Supplier_ID', 'Supplier_Name', 'WS_Category_ID', 'WS_Category_Name', \
'WS_Node_ID', 'WS_Node_Name', 'WS_Attr_ID', 'WS_Attribute_Name', \
'Priority', 'Rank', 'Supplier_Fill_Rate_%', 'Category_Fill_Rate_%']]
fill = fill.drop_duplicates(subset=['WS_Node_ID', 'WS_Attr_ID'])
fill = fill.sort_values(by=['WS_Category_Name', 'WS_Node_Name', 'Rank'])
fill[['Category_Fill_Rate_%']] = fill[['Category_Fill_Rate_%']].fillna(value='0')
outfile = 'C:/Users/xcxg109/NonDriveFiles/SUPPLIER_REPORT_'+str(batch)+'_.xlsx'
writer = pd.ExcelWriter(outfile, engine='xlsxwriter')
workbook = writer.book
fill.to_excel(writer, sheet_name='Attribute Fill Rates', startrow =0, startcol=0, index=False)
worksheet = writer.sheets['Attribute Fill Rates']
layout = workbook.add_format()
layout.set_text_wrap('text_wrap')
layout.set_align('left')
col_widths = fd.get_col_widths(fill)
col_widths = col_widths[1:]
for i, width in enumerate(col_widths):
if width > 40:
width = 40
elif width < 10:
width = 10
worksheet.set_column(i, i, width)
writer.save()
init_time = time.time()
ws_df = pd.DataFrame()
supplier_df = pd.DataFrame()
category_df = pd.DataFrame()
temp_all_cats = pd.DataFrame()
temp_all_atts = pd.DataFrame()
full_df = pd.DataFrame()
full_atts = pd.DataFrame()
print('working....')
#request the type of data to pull: blue or yellow, SKUs or node, single entry or read from file
data_type = fd.WS_search_type()
search_level = 'tprod."categoryId"'
#ask user for node number/SKU or pull from file if desired
search_data = fd.data_in(data_type, settings.directory_name)
if data_type == 'sku':
search_level = 'SKU'
print('batch = {} SKUs'.format(len(search_data)))
if len(search_data)>4000:
num_lists = round(len(search_data)/4000, 0)
num_lists = int(num_lists)
if num_lists == 1:
num_lists = 2
print('running GWS SKUs in {} batches'.format(num_lists))
size = round(len(search_data)/num_lists, 0)
size = int(size)
div_lists = [search_data[i * size:(i + 1) * size] for i in range((len(search_data) + size - 1) // size)]
for k in range(0, len(div_lists)):
print('batch {} of {}'.format(k+1, num_lists))
sku_str = ", ".join("'" + str(i) + "'" for i in div_lists[k])
temp_df = gws.gws_q(ws_basic_query, 'tprod."gtPartNumber"', sku_str)
ws_df = pd.concat([ws_df, temp_df], axis=0, sort=False)
else:
sku_str = ", ".join("'" + str(i) + "'" for i in search_data)
ws_df = gws.gws_q(ws_basic_query, 'tprod."gtPartNumber"', sku_str)
if ws_df.empty == False:
# pull all L3s for the supplier and get attribute data on each node
suppliers = ws_df['Supplier_ID'].unique().tolist()
print('# suppliers = ', len(suppliers))
loop_count = 1
for sup in suppliers:
start_time = time.time()
# pull all nodes for the supplier and get attribute data on each node
sup_df = ws_df.loc[ws_df['Supplier_ID'] == sup]
cats = sup_df['GWS_Node_ID'].unique().tolist()
print('Supplier {} -- {} of {}: {} cat'.format(sup, loop_count, len(suppliers), len(cats)))
# get fill rate and SKU counts by category for supplier
for cat in cats:
print('cat - ', cat)
# temp_df is the ENTIRE category, supplier_df is filtered by supplier ID
temp_df = gws.gws_q(ws_attr_values, 'tprod."categoryId"', cat)
# get category fill rates first because supplier may not have all active attributes in a category
fill_category = get_category_fill_rate(temp_df)
# create filtered supplier_df and get fill rates specific to supplier
supplier_df = temp_df.loc[temp_df['Supplier_ID']== sup]
supplier_df.to_csv('C:/Users/xcxg109/NonDriveFiles/sup_df.csv')
fill_supplier = get_supplier_fill_rate(supplier_df)
# set up the fill_supplier rows for a merge with temp_df -- this ensures that attributes active in the node
# but not populated by the supplier are still included in the the df
sup_name = supplier_df['Supplier_Name'].unique().tolist()
fill_category['Supplier_ID'] = sup
fill_category['Supplier_Name'] = sup_name[0]
temp_df = temp_df.merge(fill_category, how="outer", on=['WS_Category_ID', \
'WS_Category_Name', 'WS_Node_ID', 'WS_Node_Name', 'Supplier_ID', \
'Supplier_Name', 'WS_Attr_ID', 'WS_Attribute_Name', 'Priority', \
'Rank'])
temp_df = temp_df.merge(fill_supplier, how="outer", on=['WS_Attr_ID'])
temp_df[['Supplier_Fill_Rate_%']] = temp_df[['Supplier_Fill_Rate_%']].fillna(value='0')
temp_df = temp_df.sort_values(by=['WS_Category_ID', 'WS_Attr_ID', 'Category_Fill_Rate_%', \
'Supplier_Fill_Rate_%'])
temp_df = temp_df.drop_duplicates('WS_Attr_ID', keep='first')
temp_all_cats = pd.concat([temp_all_cats, supplier_df], axis=0, sort=False)
temp_all_atts = pd.concat([temp_all_atts, temp_df], axis=0, sort=False)
full_df = pd.concat([full_df, temp_all_cats], axis=0, sort=False)
full_atts = pd.concat([full_atts, temp_all_atts], axis=0, sort=False)
print("--- segment: {} minutes ---".format(round((time.time() - start_time)/60, 2)))
loop_count += 1
if len(full_df) > 300000:
count = 1
# split into multiple dfs of 40K rows, creating at least 2
num_lists = round(len(full_df)/300000, 0)
num_lists = int(num_lists)
if num_lists == 1:
num_lists = 2
print('creating {} output files'.format(num_lists))
# np.array_split creates [num_lists] number of chunks, each referred to as an object in a loop
split_df = np.array_split(full_df, num_lists)
for object in split_df:
print('iteration {} of {}'.format(count, num_lists))
data_out(object, full_atts, count)
count += 1
# if original df < 30K rows, process the entire thing at once
else:
data_out(full_df, full_atts)
print("--- total time: {} minutes ---".format(round((time.time() - init_time)/60, 2)))
#temp_df.to_csv('C:/Users/xcxg109/NonDriveFiles/temp_df.csv') | [
"colette@thecolettegabriel.com"
] | colette@thecolettegabriel.com |
576d7206a61c14dd506527f40186f72c7372f351 | 95f66ab9a81f29b5a887dd792e39b7cb256a7e57 | /NeuralNet.py | e25ac7db6a772787236524dfc81d9bc6ac192a7d | [] | no_license | Inquient/training | 8e0a828bebcd51fc14d79fc9d1fb503f15f101f6 | 869d6da075c7f615b183f035803d868df463dd6c | refs/heads/master | 2021-04-09T14:20:41.246342 | 2018-04-15T13:18:16 | 2018-04-15T13:18:16 | 125,519,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,331 | py | import numpy
import os
import matplotlib.pyplot
from scipy.special import expit as sigmoid
from scipy.special import logit as logit
class neuralNetwork:
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
self.inodes = inputnodes # количество узлов входного слоя
self.hnodes = hiddennodes # скрытого слоя
self.onodes = outputnodes # выходгого слоя
self.lr = learningrate # коэффициент обучения, он же - шаг градиентного спуска
# матрица весов связей входного слоя со скрытым
self.wih = numpy.random.normal(0.0, pow(self.hnodes,-0.5), (self.hnodes, self.inodes))
# матрица весов связей скрытого слоя с выходным
self.who = numpy.random.normal(0.0, pow(self.onodes,-0.5), (self.onodes, self.hnodes))
# фунуция активации (сигмоида)
self.activation_function = lambda x: sigmoid(x)
# обратная функция активации для обратного прохода
self.inverse_activation_function = lambda x: logit(x)
pass
def train(self, inputs_list, targets_list):
# преобразуем массивы входных и целевых значений в вектора (транспонируем)
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# прогоняем входные значения через нейросеть
# получаем значения выходного и скрытого слоёв
final_outputs, hidden_outnputs = self.query(inputs_list)
# вычисляем значения ошибки на выходном слое
output_errors = targets - final_outputs
# и на скрытом слое путём умножения матрицы связей между скрытым и выходным слоями на вектор выходных ошибок
hidden_errors = numpy.dot(self.who.T, output_errors)
# с помощью метода градиентного спуска корректируем веса связей в обоих матрицах
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outnputs))
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outnputs * (1.0 - hidden_outnputs)),
numpy.transpose(inputs))
pass
# Опрос нейросети
# Формирует ответ на входные данные на основании обучения
def query(self, inputs_list):
# массив целевых значений транспонируется и теперь представляет собой вектор
inputs = numpy.array(inputs_list, ndmin=2).T
# полученный вектор умножается на матрицу весов связей входного слоя со скрытым
# получаем вектор входных значений скрытого слоя
hidden_inputs = numpy.dot(self.wih, inputs)
# эти значения пропускаются через функцию активации
# получаем выходные значения скрытого слоя
hidden_outnputs = self.activation_function(hidden_inputs)
# выходные значения скрытого слоя умножаются на матрицу весов связей скрытого слоя с выходным
final_inputs = numpy.dot(self.who, hidden_outnputs)
# и пропускаются через функцию активации
final_outputs = self.activation_function(final_inputs)
# в итоге имеем выходные значения выходного и скрытого слоёв
return final_outputs, hidden_outnputs
def backquery(self, targets_list):
# транспонирует массив целевых значений в вертикальный
final_outputs = numpy.array(targets_list, ndmin=2).T
# расчитываем сигналы на входе выходного слоя пропуская их через обратную функцию активации
final_inputs = self.inverse_activation_function(final_outputs)
# расчитываем сигналы на выходе скрытого слоя умножая матрицу связей между скрытым и выходным слоями
# на входные сигналы выходного слоя
hidden_outputs = numpy.dot(self.who.T, final_inputs)
# масштабируем их к значениям от 0.01 до 0.99
hidden_outputs -= numpy.min(hidden_outputs)
hidden_outputs /= numpy.max(hidden_outputs)
hidden_outputs *= 0.98
hidden_outputs += 0.01
# аналогично расчитываем сигналы на входе скрытого слоя
hidden_inputs = self.inverse_activation_function(hidden_outputs)
# и сигналы на входном слое
inputs = numpy.dot(self.wih.T, hidden_inputs)
# масштабируем их к значениям от 0.01 до 0.99
inputs -= numpy.min(inputs)
inputs /= numpy.max(inputs)
inputs *= 0.98
inputs += 0.01
# возвращаем значения, которые моглы быть поданы на вход нейросети при таких целевых значениях,
# которые мы только что через неё прогнали
return inputs
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
epochs = 5 # количество раз, которые нейросеть прогонит обучающую выборку
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
training_data_file = open(os.path.abspath('mnist/mnist_train.csv'), 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
for e in range(epochs):
progress = 0
for record in training_data_list:
# разбивает строку входных значений на список
all_values = record.split(',')
# входные значения (кроме первого в списке - целевого)
# преобразуем к виду, пригодному к обработке сетью (значения от 0.01 до 0.99)
inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
# создаём массив целевых значений нейросети
# их 10, так как выходных узлов 10, инициализируем 0.01
targets = numpy.zeros(output_nodes)+0.01
# иициализируем целевое значение данного набора данных в списке целевых значений
# элементу целевого массива с номером равным первому значению из обучающей выборки присваивается 0.99
targets[int(all_values[0])] = 0.99
# обучаем сеть
n.train(inputs, targets)
# индикатор процесса обучения
progress += 1
if progress/len(training_data_list) in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
print("progress = ", progress/len(training_data_list)*100, "%")
print("Epoch ", e, " of ", epochs)
test_data_file = open(os.path.abspath("mnist/mnist_test.csv"), 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# журнал оценок работы сети, первоначально пустой
scorecard = []
# перебрать все записи в тестовом наборе данных
for record in test_data_list:
# получить список значений из записи, используя символы
# запятой (',') в качестве разделителей
all_values = record.split(',')
# правильный ответ - первое значение
correct_label = int(all_values[0])
# print(correct_label, "истинный маркер")
# масштабировать и сместить входные значения
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# опрос сети
outputs = n.query(inputs)
# индекс наибольшего значения является маркерным значением
label = numpy.argmax(outputs[0])
# print(label, "ответ сети")
# присоединить оценку ответа сети к концу списка
if (label == correct_label) :
# в случае правильного ответа сети присоединить
# к списку значение 1
scorecard.append(1)
else:
# в случае неправильного ответа сети присоединить
# к списку значение 0
scorecard.append(0)
# print(scorecard)
print(sum(scorecard)/len(scorecard))
# run the network backwards, given a label, see what image it produces
# label to test
label = 3
# create the output signals for this label
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[label] = 0.99
# get image data
image_data = n.backquery(targets)
# plot image data
matplotlib.pyplot.imshow(image_data.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
# image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
# matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
# matplotlib.pyplot.show()
| [
"makoveyfedorich@gmail.com"
] | makoveyfedorich@gmail.com |
1f625674e11cbfc47a5961e607882b7515e6339a | c21cbf8209d317bd5ac898d6ccddb5f67f585da6 | /Ch 9 - Classes/admin.py | b998a1fea6e05dd7b5daa5107dd08e23e59c22e2 | [] | no_license | bwest619/Python-Crash-Course | 53db275879b191b8ed564ced16366e7d6657de98 | 35eb85aad38f69d65c9ffab552625fd213264831 | refs/heads/master | 2020-05-05T01:13:06.424525 | 2019-04-05T00:51:43 | 2019-04-05T00:51:43 | 179,596,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # Try it yourself, page 178
class Users:
"""Making a class for user profiles."""
def __init__(self, first_name, last_name, username, email, location):
"""Initialize the user."""
self.first_name = first_name.title()
self.last_name = last_name.title()
self.username = username
self.email = email
self.location = location.title()
self.login_attempts = 0
def describe_user(self):
"""Print a description of the user"""
print("\n" + self.first_name + " " + self.last_name)
print("Username: " + self.username)
print("Email: " + self.email)
print("Location: " + self.location)
def greet_user(self):
"""Prints a greeting to the user"""
print("Welcome, " + self.username + "!")
def increment_login_attempts(self):
"""Incrementing the value of login attempts by 1."""
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(Users):
"""Making a class for administrators"""
def __init__(self, first_name, last_name, username, email, location):
"""Initializing aspects of the administrators"""
super().__init__(first_name, last_name, username, email, location)
self.privileges = []
def show_privileges(self):
"""Prints privileges an admin has."""
print("An admin has a set of privileges that can do any of the following:")
for privilege in self.privileges:
print(privilege)
administer = Admin('blaine', 'west', 'man in charge', 'bwest@bullshit.com', 'denver')
administer.privileges = ['can add post', 'can delete post', 'can ban user', 'can change your name to something silly']
administer.describe_user()
administer.greet_user()
administer.show_privileges()
| [
"blaineryanwest@gmail.com"
] | blaineryanwest@gmail.com |
becb97ab51bd113a00a2a0c169559e348ee0f82c | a46b14b44c87adb0288224a0e7e31d9bed30223f | /guest_project/apps/guest_app/models.py | f6db55f42203f80a0a458a0b4a83ca4f50478693 | [] | no_license | JeffLawrence1/Python-Django-Intermediate | 0b663e5d706dc6b35ff2785ae38d7bf0f2f3b651 | d1efc3e6385286ab25bae36042987a85ae94e359 | refs/heads/master | 2020-03-09T03:42:47.348420 | 2018-04-07T21:42:04 | 2018-04-07T21:42:04 | 128,570,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=255) | [
"jefflaw13@hotmail.com"
] | jefflaw13@hotmail.com |
edc117b558873902ee1d38b226f7af11cebc80c9 | 58df99d96af6a688852993e38da89b75fea1d0dc | /exps/NATS-Bench/draw-correlations.py | 6afac3b804703bc53660e618d2c2a6e820974d3e | [
"MIT"
] | permissive | yuezhixiong/AutoDL-Projects | 0f24ed98389b70f452a79c8ef825d5e563ac5d8c | 0d3c63bdbe2d648c2119ffe8d0491f8a07cf85cb | refs/heads/master | 2023-03-22T17:15:37.013837 | 2021-03-02T05:13:51 | 2021-03-02T05:13:51 | 315,518,182 | 0 | 1 | MIT | 2021-02-26T06:36:34 | 2020-11-24T04:28:29 | Python | UTF-8 | Python | false | false | 3,860 | py | ###############################################################
# NATS-Bench (arxiv.org/pdf/2009.00437.pdf), IEEE TPAMI 2021 #
###############################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.06 #
###############################################################
# Usage: python exps/NATS-Bench/draw-correlations.py #
###############################################################
import os, gc, sys, time, scipy, torch, argparse
import numpy as np
from typing import List, Text, Dict, Any
from shutil import copyfile
from collections import defaultdict, OrderedDict
from copy import deepcopy
from pathlib import Path
import matplotlib
import seaborn as sns
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import dict2config, load_config
from nats_bench import create
from log_utils import time_string
def get_valid_test_acc(api, arch, dataset):
is_size_space = api.search_space_name == 'size'
if dataset == 'cifar10':
xinfo = api.get_more_info(arch, dataset=dataset, hp=90 if is_size_space else 200, is_random=False)
test_acc = xinfo['test-accuracy']
xinfo = api.get_more_info(arch, dataset='cifar10-valid', hp=90 if is_size_space else 200, is_random=False)
valid_acc = xinfo['valid-accuracy']
else:
xinfo = api.get_more_info(arch, dataset=dataset, hp=90 if is_size_space else 200, is_random=False)
valid_acc = xinfo['valid-accuracy']
test_acc = xinfo['test-accuracy']
return valid_acc, test_acc, 'validation = {:.2f}, test = {:.2f}\n'.format(valid_acc, test_acc)
def compute_kendalltau(vectori, vectorj):
# indexes = list(range(len(vectori)))
# rank_1 = sorted(indexes, key=lambda i: vectori[i])
# rank_2 = sorted(indexes, key=lambda i: vectorj[i])
# import pdb; pdb.set_trace()
coef, p = scipy.stats.kendalltau(vectori, vectorj)
return coef
def compute_spearmanr(vectori, vectorj):
coef, p = scipy.stats.spearmanr(vectori, vectorj)
return coef
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--save_dir', type=str, default='output/vis-nas-bench/nas-algos', help='Folder to save checkpoints and log.')
parser.add_argument('--search_space', type=str, choices=['tss', 'sss'], help='Choose the search space.')
args = parser.parse_args()
save_dir = Path(args.save_dir)
api = create(None, 'tss', fast_mode=True, verbose=False)
indexes = list(range(1, 10000, 300))
scores_1 = []
scores_2 = []
for index in indexes:
valid_acc, test_acc, _ = get_valid_test_acc(api, index, 'cifar10')
scores_1.append(valid_acc)
scores_2.append(test_acc)
correlation = compute_kendalltau(scores_1, scores_2)
print('The kendall tau correlation of {:} samples : {:}'.format(len(indexes), correlation))
correlation = compute_spearmanr(scores_1, scores_2)
print('The spearmanr correlation of {:} samples : {:}'.format(len(indexes), correlation))
# scores_1 = ['{:.2f}'.format(x) for x in scores_1]
# scores_2 = ['{:.2f}'.format(x) for x in scores_2]
# print(', '.join(scores_1))
# print(', '.join(scores_2))
dpi, width, height = 250, 1000, 1000
figsize = width / float(dpi), height / float(dpi)
LabelSize, LegendFontsize = 14, 14
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(scores_1, scores_2 , marker='^', s=0.5, c='tab:green', alpha=0.8)
save_path = '/Users/xuanyidong/Desktop/test-temp-rank.png'
fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')
plt.close('all')
| [
"280835372@qq.com"
] | 280835372@qq.com |
2d02387748595eb5f05de1450d986ab0bbe8728f | fba281163b4761985f66d9cd31463e51bf407211 | /Remove PDF Pages.py | 1b0f8c99ab5951faa4181d218d301000fbd9a04e | [] | no_license | apiarian/pythonista-ios-scripts | 30722e434d1d3b7176f6d4f5f68431ffb5aa357e | bfaddf0c0759a01c4b1f2bd7a324d4620931e1e2 | refs/heads/master | 2020-03-08T22:27:28.986089 | 2018-04-24T14:49:00 | 2018-04-24T14:49:00 | 128,431,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # Install as a share sheet extension for Pythonista to remove pages from a PDF. The trimmed PDF is shown in a final quicklook view for sharing or saving.
import appex
import PyPDF2
import tempfile
import console
import os
if appex.is_running_extension():
src_path = appex.get_file_path()
else:
src_path = './testpdf.pdf'
src = PyPDF2.PdfFileReader(src_path)
dst = PyPDF2.PdfFileWriter()
for i in range(src.getNumPages()):
p = src.getPage(i)
with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as f:
tmp = PyPDF2.PdfFileWriter()
tmp.addPage(p)
tmp.write(f)
f.close()
console.quicklook(f.name)
if console.alert(
'Keep?',
'Keep that page?',
'Yes',
'No',
) == 1:
dst.addPage(p)
with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as f:
dst.write(f)
f.close()
console.alert('Done', 'Show the results!', 'OK', hide_cancel_button=True)
console.quicklook(f.name)
os.remove(f.name)
| [
"al@megamicron.net"
] | al@megamicron.net |
71c7183af31724bdb98af5d9f036f02e3b81e761 | 3f51b9990728e50f531ab5250383505c18db8fcb | /www/config.py | bcc079bdfd48eca24bc76d6a25e10d5463fe390e | [] | no_license | limin2016/awesome-python3-webapp | d4b88c1358efa4af468204a505a90236b2cecfb7 | 120288573b3d120bc64709c17c11689f3324af39 | refs/heads/master | 2021-01-24T18:37:20.086220 | 2017-03-30T14:45:48 | 2017-03-30T14:45:48 | 84,443,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Configuration
'''
__author__ = 'Michael Liao'
import config_default
class Dict(dict):
'''
Simple dict but support access as x.y style.
'''
def __init__(self, names=(), values=(), **kw):
super().__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def merge(defaults, override): #两个参数都是dict
r = {} #重新构建一个dict,返回的配置好的dict
for k, v in defaults.items():
if k in override:
if isinstance(v, dict):
r[k] = merge(v, override[k]) #递归替换相应的参数
else:
r[k] = override[k]
else:
r[k] = v
return r
def toDict(d): #目的是为了让重新构建的config这个dict有 dict_name.key的方法,普通的dict没法通过dict_name.key这种方法来获取value值
D = Dict()
for k, v in d.items():
D[k] = toDict(v) if isinstance(v, dict) else v
return D
configs = config_default.configs
try:
import config_override
configs = merge(configs, config_override.configs)
except ImportError:
pass
configs = toDict(configs)
| [
"1466299085@qq.com"
] | 1466299085@qq.com |
e11030a1f2ae991395d0056407fcffab069729d1 | 4500c044ea8287787ad03b6064d7bfac8560e874 | /projects/downstream_classification_race/centering.py | f168c5313fdccdc96d7c103d359ca24e0d57ae68 | [] | no_license | apd10/core | 9343a796f079bf42807338bc44ac14172833d99b | f82f30e4a6738002821cfea83c3b6e080604c5b0 | refs/heads/master | 2022-12-19T21:59:22.424132 | 2020-09-27T23:14:19 | 2020-09-27T23:14:19 | 281,093,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import pandas as pd
import sys
import numpy as np
if len(sys.argv) < 3:
print("Usage: <script.py> <fname> <sep> <skip_rows>")
exit(0)
f = sys.argv[1]
sep = sys.argv[2]
skip_rows = int(sys.argv[3])
d = pd.read_csv(f, sep = sep, header=None, skiprows=skip_rows)
x = d.values[:,1:]
mu = np.mean(x, axis=0)
std = np.std(x, axis=0)
x = (x - mu)/std
norms = np.sqrt(np.sum(np.multiply(x, x), axis=1))
pctiles = np.array([50, 75, 90, 95, 99, 99.9])
norm_stats = np.array([np.percentile(norms, pct) for pct in pctiles])
writef = '/'.join(f.split('/')[:-1]) + '/centering_info.npz'
print(writef)
np.savez_compressed(writef, mu=mu, std=std, pct=pctiles, post_norm_stats=norm_stats)
print(pctiles)
print(norm_stats)
| [
"apd10@yogi.cs.rice.edu"
] | apd10@yogi.cs.rice.edu |
fb058eee50ffdb930c9e0b9c23249411cb7f4ed1 | 8f76a9a1b5d8cc2eab1a160584f566dd00c4aeff | /05-opencv/template_matching.py | 636221a46a5b57b1a4832142188f0f21d4495cba | [] | no_license | aciddust/Python-Study-OpenCV | 0c224566df46aef7c14b64ed6663aaae37e4812d | f56438188466dc8881d8bda766ba573b93308f92 | refs/heads/master | 2022-02-13T21:53:32.249363 | 2019-09-08T10:17:09 | 2019-09-08T10:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import cv2
img = cv2.imread('../imgs/lenna.png')
img_temp = cv2.imread('../imgs/lenna_face.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_temp = cv2.cvtColor(img_temp, cv2.COLOR_BGR2GRAY)
w, h = gray_temp.shape[::-1]
output = cv2.matchTemplate(gray, gray_temp, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(output)
top = max_loc
bottom = (top[0] + w, top[1] + h)
cv2.rectangle(img, top, bottom, 255, 2)
cv2.imshow('image', img)
cv2.imwrite('img.jpg', img) | [
"aciddust20@gmail.com"
] | aciddust20@gmail.com |
592cca932c6d29898437e2362af88c8d578e9466 | a735cc0b04b3227720bfd97c74ef13bda5bdf571 | /python/documentation/doc/conf.py | 87be3541d59a67e9c9cc135f03e7e0690fa181a4 | [
"MIT"
] | permissive | abstractfactory/labs | beed0aab27cd3028c67ece87ef91d18b55114eb1 | f0791fb92686456d4cef3a11f699590a949fd6a9 | refs/heads/master | 2021-01-23T20:50:07.613682 | 2014-11-18T10:30:29 | 2014-11-18T10:30:29 | 20,175,862 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,179 | py | # -*- coding: utf-8 -*-
#
# Labs documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 24 15:49:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Labs'
copyright = u'2014, Marcus Ottosson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Labsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Labs.tex', u'Labs Documentation',
u'Marcus Ottosson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'labs', u'Labs Documentation',
[u'Marcus Ottosson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Labs', u'Labs Documentation',
u'Marcus Ottosson', 'Labs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"konstruktion@gmail.com"
] | konstruktion@gmail.com |
c2c96efb9d83a79ac8a7d9c9c470b71f3b5b3a20 | 2a736942082b1b59ba1be0f983211ff2a46eed46 | /4.py | 1e8620e014b2fb10767b9c6405fe240200520f70 | [] | no_license | Lola-n28/exam1 | 1e07c030766169725eb0cf5104467a7a50eb631e | 45d41dc9e78af7ad593490dd48ad33dcdf8474f6 | refs/heads/main | 2023-03-09T21:12:05.067851 | 2021-02-28T15:35:38 | 2021-02-28T15:35:38 | 343,133,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Дается 2 строки "Aidana" и "Adilet" . Вам нужно в результате получить смешанную строку из двух имен, буква за буквой.
# Output: "AAiddialneat"
str1="Aidana"
str2="Adilet"
str3=str1[0]+str2[0]+str1[1]+str2[1]+str1[2]+str2[2]+str1[3]+str2[3]+str1[4]+str2[4]+str1[5]+str2[5]
print(str3)
| [
"lola-n@mail.ru"
] | lola-n@mail.ru |
4d1a51fd51c3b13e2b368aa1ef3b0e4c5f8fa970 | 971b59efea59d8f4813a6f8990ff76440c61cc3e | /main.py | 105e2366dd0a9a9075085a33efbf8a6d0b43062f | [] | no_license | yossarian27/web-caesar | 4b9d78fbe8ff1641e4a5474efb6b0c348e84bafe | a900c6757fa3923b3f501aa65ea8b23a5fd55514 | refs/heads/master | 2021-01-23T05:00:01.720392 | 2017-03-26T22:39:44 | 2017-03-26T22:39:44 | 86,269,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import caeser
import cgi
def build_page(textarea_content):
rot_label = "<label>Rotate by:</label>"
rotation_input = "<input type='number' name='rotation'/>"
message_label = "<label>Type a message:</label>"
textarea = "<textarea name='message'>" + textarea_content + "</textarea>"
submit = "<input type='submit'/>"
form = ("<form method='post'>" +
rot_label + rotation_input + "<br>" +
message_label + textarea + "<br>" +
submit + "</form>")
header = "<h2>Web Caesar</h2>"
return header + form
class MainHandler(webapp2.RequestHandler):
def get(self):
content = build_page("")
self.response.write(content)
def post(self):
message = self.request.get("message")
rotation = int(self.request.get("rotation"))
encrypted_message = caeser.encrypt(message, rotation)
escaped_message = cgi.escape(encrypted_message)
content = build_page(escaped_message)
self.response.write(content)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| [
"horn.philip@gmail.com"
] | horn.philip@gmail.com |
5c51bfb7f282424132788fd7c4194077772ec60a | b8cf6201e364e71180889218262bcf3833097f30 | /matrixes.py | 38f0228517a27645ea51e08f1044266a2dbb1e85 | [] | no_license | skvimpact/ml | efe3519964ac8b5940ba58763c4911e6731def1c | a0f8e129047263dfbfb701e42871d7c3ab40a71d | refs/heads/master | 2023-07-03T05:49:13.529017 | 2021-08-06T14:10:14 | 2021-08-06T14:10:14 | 393,365,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import numpy as np
A = np.array([
[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16],
[17,18,19,20]
])
# print(A.shape)
# print(A[1:2, 1:2])
# print(A[1:, 1:])
# print(np.zeros((3, 5)))
# print(A - 2)
# M1 = np.ones((3, 6))
# print(M1)
# print(M1.T)
M1 = np.array(
[[1,2],
[3,4],
[5,6],
[7,8]]
)
M2 = np.array(
[
[1, 2, 3],
[5, 4, 3],
[2, 0, 1]
]
)
# M2 = np.array([
# [1,2],
# [3,4]
# ])
# print(M1)
# print(M2)
# M3 = M2
M4 = np.linalg.inv(M2)
print(M4)
# print(M4)
print(M2 @ M4) | [
"skvimpact@yandex.ru"
] | skvimpact@yandex.ru |
cdd94e2790e5892438719e5ec02d940765243b43 | e8cfc888d3756635d49c9cb92fd0af02af58a18d | /customer/migrations/0006_remove_customer_customer_picture.py | 944ea3784ef4c5e218cae4d10b58f8e644b23c59 | [] | no_license | maryvillondo/MHA | 666b7b78c5f5671e4c2ecbbc0fdefd01f8946836 | 271b39c0866d006d3719d4de6ddb5544485d852d | refs/heads/master | 2022-12-27T21:02:44.116961 | 2020-10-16T11:54:02 | 2020-10-16T11:54:02 | 296,863,527 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # Generated by Django 3.1.1 on 2020-09-19 11:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0005_auto_20200919_1955'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='customer_picture',
),
]
| [
"maryvillondo@gmail.com"
] | maryvillondo@gmail.com |
5242f6f122ece46875d63baf451df2044a5956d8 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/adcampaignfrequencycontrolspecs.py | d0005352b87cf20f8700d3c56dda97efc9a99ee6 | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 1,973 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdCampaignFrequencyControlSpecs(
AbstractObject,
):
def __init__(self, api=None):
super(AdCampaignFrequencyControlSpecs, self).__init__()
self._isAdCampaignFrequencyControlSpecs = True
self._api = api
class Field(AbstractObject.Field):
event = 'event'
interval_days = 'interval_days'
max_frequency = 'max_frequency'
_field_types = {
'event': 'string',
'interval_days': 'unsigned int',
'max_frequency': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
19a33e678a67169ee30373693e79bdefba41046e | 55e9aa8ca964d82f99b245366dd35e557b526d5d | /leetcode/8. String to Integer (atoi).py | be686e10ed4cf453ce8d75c400e997442b881c7a | [] | no_license | gjwei/algorithms | ee31bd4e70a7974824ca234383eed3341d239830 | 07fa69137877b0a3bc6f0df7d7a190d737bacd6c | refs/heads/master | 2021-01-20T05:09:24.434195 | 2018-09-19T08:40:31 | 2018-09-19T08:40:31 | 141,776,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # coding: utf-8
# Author: gjwei
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
sign = 1
index = 0
result = 0
# 去除开始的空格
while index < len(str) and str[index] == ' ':
index += 1
if index == len(str):
return 0
if str[index] == '-' or str[index] == '+':
sign = -1 if str[index] == '-' else 1
index += 1
MAX = 2 << 30
while index < len(str) and '0' <= str[index] <= '9':
result = result * 10 + int(str[index])
index += 1
if result > MAX:
result = MAX - 1 if sign == 1 else -MAX
break
return sign * result | [
"t-jiagao@microsoft.com"
] | t-jiagao@microsoft.com |
33b7d232898bdee041f376c8b75a7cb25a89b260 | 737d7a22a39003308627a2f0cbcec8696b9a492b | /creational/builder.py | 1594f1be49ca67e8c2ae11f88209b2bb567b0ce2 | [] | no_license | vladpython22/pattern | 21b3e3e980f24069114255efc615a64cfe3879c6 | 8ffd2a699261e85355da745ef52025e367013e2c | refs/heads/master | 2023-02-15T14:17:42.642957 | 2020-12-27T12:59:56 | 2020-12-27T12:59:56 | 324,765,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | #Интерфейс для пошагового создания сложных объектов
print('Hello, World')
s = 'adffhhg'
n = 5
print(s[0:5])
| [
"prostranstvovlad@gmail.com"
] | prostranstvovlad@gmail.com |
355c5416ae648b3b060d68d7a090c1c292f89af3 | 0bfbdef12f7801f4f030b4ba5523f95429f4f4c8 | /AutoFramework/testobject/module_report_waterPower_obj.py | aeecd2a1c9710dc2ceb96f75e940787a798e4650 | [] | no_license | pylk/autoWeb | 77ef1aaceeda9d1207539f4776ac0bd88901ca41 | 59b3b65fe9fdf5a745b0bc8d493486ca79db16c8 | refs/heads/master | 2022-02-24T10:41:22.111991 | 2019-04-26T03:08:07 | 2019-04-26T03:08:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,774 | py | # -*- coding:utf-8 -*-
import os
from datetime import datetime, timedelta
from AutoFramework.core.pom import BasePage
from selenium.webdriver.common.by import By
class Moudle_report_waterPower_Unit(BasePage):
def __init__(self, *args, **kwargs):
super(Moudle_report_waterPower_Unit, self).__init__(*args, **kwargs)
# 水电板块日报表
self.waterDaily = (By.XPATH, "//a[@onclick='fun_a()']")
# 水情报表
self.waterSituation = (By.XPATH, "//a[@onclick='fun_b()']")
# 水电电站报表
self.waterpowerStation = (By.XPATH, "//a[@onclick='fun_c()']")
# 报表导出按钮
self.downLoad = (By.XPATH, "//p[contains(.,'输出')]")
self.yesterday = datetime.strftime(datetime.today() - timedelta(days=1), "%Y%m%d")
self.fileName = '水电机组情况表' + self.yesterday + '.xls'
self.fileNames = ['水电机组情况表' + self.yesterday + ' (' + str(x) + ')' + '.xls' for x in range(1, 100)]
self.fileNames.append(self.fileName)
def click_waterDaily(self):
self.click(self.waterDaily)
def click_waterpowerStation(self):
self.click(self.waterpowerStation)
def click_waterSituation(self):
self.click(self.waterSituation)
def click_downLoad(self):
"""下载报表"""
self.click(self.downLoad)
self.getLogger.info("{}:下载成功".format(self.fileName))
def remove_downloadFile(self):
"""删除报表"""
self.pwd = os.getcwd()
self.fPath = os.path.abspath(os.path.dirname(self.pwd))
self.dPath = os.path.join(self.fPath, r'testData')
self.filePath = [os.path.join(self.dPath, p) for p in self.fileNames]
for i in self.filePath:
if os.path.exists(i):
os.remove(i)
self.getLogger.info("{}:删除成功".format(i))
class Moudle_report_waterPower_powerStation(BasePage):
def __init__(self, *args, **kwargs):
super(Moudle_report_waterPower_powerStation, self).__init__(*args, **kwargs)
# 水电板块日报表
self.waterDaily = (By.XPATH, "//a[@onclick='fun_a()']")
# 水情报表
self.waterSituation = (By.XPATH, "//a[@onclick='fun_b()']")
# 水电机组报表
self.waterUnit = (By.XPATH, "//a[@onclick='fun_c()']")
# 报表导出按钮
self.downLoad = (By.XPATH, "//p[contains(.,'输出')]")
self.yesterday = datetime.strftime(datetime.today() - timedelta(days=1), "%Y%m%d")
self.fileName = '发电情况表' + self.yesterday + '.xls'
#将导出文件名(包含重复下载或上次删除失败的文件)组装成列表
self.fileNames = ['发电情况表' + self.yesterday + ' (' + str(x) + ')' + '.xls' for x in range(1, 100)]
self.fileNames.append(self.fileName)
def click_waterDaily(self):
self.click(self.waterDaily)
def click_waterUnit(self):
self.click(self.waterUnit)
def click_waterSituation(self):
self.click(self.waterSituation)
def click_downLoad(self):
"""下载报表"""
self.click(self.downLoad)
self.getLogger.info("{}:下载成功".format(self.fileName))
def remove_downloadFile(self):
"""删除报表"""
self.pwd = os.getcwd()
self.fPath = os.path.abspath(os.path.dirname(self.pwd))
self.dPath = os.path.join(self.fPath, r'testData')
self.filePath = [os.path.join(self.dPath, p) for p in self.fileNames]
for i in self.filePath:
if os.path.exists(i):
os.remove(i)
self.getLogger.info("{}:删除成功".format(i))
class Moudle_report_waterPower_Situation(BasePage):
def __init__(self, *args, **kwargs):
super(Moudle_report_waterPower_Situation, self).__init__(*args, **kwargs)
# 水电板块日报表
self.waterDaily = (By.XPATH, "//a[@onclick='fun_a()']")
# 水电机组报表
self.waterUnit = (By.XPATH, "//a[@onclick='fun_b()']")
# 水电电站报表
self.waterPowerStation = (By.XPATH, "//a[@onclick='fun_c()']")
# 报表导出按钮
self.downLoad = (By.XPATH, "//p[contains(.,'输出')]")
self.yesterday = datetime.strftime(datetime.today() - timedelta(days=1), "%Y%m%d")
self.fileName = '水情报表日报' + '.xls'
self.fileNames = ['水情报表日报' + ' (' + str(x) + ')' + '.xls' for x in range(1, 100)]
self.fileNames.append(self.fileName)
def click_waterDaily(self):
self.click(self.waterDaily)
def click_waterUnit(self):
self.click(self.waterUnit)
def click_waterPowerStation(self):
self.click(self.waterPowerStation)
def click_downLoad(self):
"""下载报表"""
self.click(self.downLoad)
self.getLogger.info("{}:下载成功".format(self.fileName))
def remove_downloadFile(self):
"""删除报表"""
self.pwd = os.getcwd()
self.fPath = os.path.abspath(os.path.dirname(self.pwd))
self.dPath = os.path.join(self.fPath, r'testData')
self.filePath = [os.path.join(self.dPath, p) for p in self.fileNames]
for i in self.filePath:
if os.path.exists(i):
os.remove(i)
self.getLogger.info("{}:删除成功".format(i))
class Moudle_report_waterPower_singleHostReport(BasePage):
def __init__(self, *args, **kwargs):
super(Moudle_report_waterPower_singleHostReport, self).__init__(*args, **kwargs)
# 开机次数
self.power_on = (By.XPATH, "//a[contains(.,'开机次数')]")
# 停机次数
self.shutdown = (By.XPATH, "//a[contains(.,'停机次数')]")
# 运行小时
self.hours_run = (By.XPATH, "//a[contains(.,'运行小时')]")
# 冷备用小时
self.cold_standby = (By.XPATH, "//a[contains(.,'冷备用小时')]")
# 热备用小时
self.hot_standby = (By.XPATH, "//a[contains(.,'热备用小时')]")
# 检修小时
self.overhaul_hours = (By.XPATH, "//a[contains(.,'检修小时')]")
# 发电量
self.power_generation = (By.XPATH, "//a[@href='javascript:queryWaterSingleFdl();']")
# 报表导出按钮
self.downLoad = (By.XPATH, "//p[contains(.,'输出')]")
self.year = datetime.strftime(datetime.today(), "%Y")
self.fileName = '单机报表(开机次数)' + self.year + '.xls'
self.fileNames = ['单机报表(开机次数)' + self.year + ' (' + str(x) + ')' + '.xls' for x in range(1, 100)]
self.fileNames.append(self.fileName)
def click_power_on(self):
self.click(self.power_on)
def click_shutdown(self):
self.click(self.shutdown)
def click_hours_run(self):
self.click(self.hours_run)
def click_cold_standby(self):
self.click(self.cold_standby)
def click_hot_standby(self):
self.click(self.hot_standby)
def click_overhaul_hours(self):
self.click(self.overhaul_hours)
def click_power_generation(self):
self.click(self.power_generation)
def click_downLoad(self):
"""下载报表"""
self.click(self.downLoad)
self.getLogger.info("{}:下载成功".format(self.fileName))
def remove_downloadFile(self):
"""删除报表"""
self.pwd = os.getcwd()
self.fPath = os.path.abspath(os.path.dirname(self.pwd))
self.dPath = os.path.join(self.fPath, r'testData')
self.filePath = [os.path.join(self.dPath, p) for p in self.fileNames]
for i in self.filePath:
if os.path.exists(i):
os.remove(i)
self.getLogger.info("{}:删除成功".format(i))
| [
"isscal@sina.com"
] | isscal@sina.com |
dc676abe2bee2c632ee93b23ce50edf4dd60867f | d451b43ba9744775e34b34bd4bd32328afc55b7c | /commit_download.py | e006e03c4364046ee5605c05e4b503bb3d57cdc8 | [
"MIT"
] | permissive | lyriccoder/temp_python | aaaeec7250f32e31790ca61d22b3d6ed9289a420 | f9c2263dbdc89c10dd9665dd846e7b637d6819cf | refs/heads/master | 2023-05-01T14:00:28.125599 | 2023-04-22T19:15:38 | 2023-04-22T19:15:38 | 297,299,305 | 0 | 2 | MIT | 2021-01-26T12:56:20 | 2020-09-21T10:01:51 | Python | UTF-8 | Python | false | false | 12,180 | py | from pathlib import Path
from typing import Dict, Any, List, Tuple
import os
import pandas as pd
import requests
from random import choice
from tqdm import tqdm
import json
from pathlib import Path
import os
import pandas as pd
import requests
from random import choice
from tqdm import tqdm
import time
import asyncio
import aiohttp
import json
import os
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from concurrent.futures import TimeoutError
from csv import DictWriter, QUOTE_MINIMAL
from functools import partial
from os import cpu_count, getenv, makedirs, sched_getaffinity
from typing import List, Dict
import uuid
from io import StringIO, BytesIO
from lxml import html, etree
import traceback
from typing import List, Tuple, Set, Dict
from loguru import logger
from tree_sitter.binding import Node
from program_graphs.adg.parser.java.utils import parse_ast_tree_sitter
from urllib.parse import unquote
import sys
import requests
requests.urllib3.disable_warnings()
# HERE there must be tokens for github API
tokens = [
]
logger.remove(0)
if not Path('logs').exists():
Path('logs').absolute().mkdir()
logger.add(
"logs/debug.log", rotation="500 MB", filter=lambda record: record["level"].name == "DEBUG", backtrace=True,
diagnose=True)
logger.add(
"logs/info.log", rotation="500 MB", filter=lambda record: record["level"].name == "INFO", backtrace=True,
diagnose=True)
logger.add(
"logs/error.log", rotation="500 MB", filter=lambda record: record["level"].name == "ERROR", backtrace=True,
diagnose=True)
def get_random_token(token_dict) -> Tuple[str, str]:
return choice(token_dict)
def traverse(root) -> Node:
yield root
if root.children:
for child in root.children:
for result in traverse(child):
yield result
def get_tree_sitter_node_name(node: Node, code: str) -> str:
if node.type == "field_declaration":
var_decl_node = \
[x for x in node.children if x.type == "variable_declarator"][0]
return get_tree_sitter_node_name(var_decl_node, code)
for n in node.children:
if n.type == "identifier":
name = \
bytes(code, "utf-8")[n.start_byte:n.end_byte].decode("utf-8")
return name
async def process_commit(
url: str,
cwe_id: str,
cve_id: str,
session: Any,
args: Namespace) -> None:
global tokens
try:
headers = await get_random_auth_headers(tokens)
logger.debug(f'Connecting to {url} {type(session)}')
repo_name = url.split('/commits')[0].split('repos/')[1]
#logger.debug(f'Repo name: {repo_name}')
async with session.get(url, headers=headers, ssl=False, raise_for_status=True) as response:
logger.debug(f'Resp: {response}')
res = await response.read()
content = json.loads(res.decode())
changed_files = content.get('files')
#logger.debug(f'changed_files: {changed_files}')
commit_sha = content.get('sha')
#logger.debug(f'After reponame {changed_files}')
if changed_files:
for i, file in enumerate(changed_files, start=1):
logger.debug('Iter before start')
await find_changed_func_in_file(
args,
commit_sha,
cwe_id,
cve_id,
file,
url,
repo_name,
i
)
except asyncio.CancelledError:
raise
except:
logger.error(f'Error in {url}, {class_name}, {func_name}')
traceback.print_exc()
async def find_changed_func_in_file(
args: Namespace,
commit_sha: str,
cwe_id: str,
cve_id: str,
file: Dict[str, Any],
url: str,
repo_name: str,
iter: int) -> None:
global tokens
full_file = Path(file['filename'])
#filename = full_file.stem
raw_url = file['raw_url']
repo_url = url.split('/commits')[0].replace('repos/', '')
if str(full_file).lower().endswith(".C") or \
str(full_file).lower().endswith(".cc") or \
str(full_file).lower().endswith(".c") or \
str(full_file).lower().endswith(".cpp") or \
str(full_file).lower().endswith(".cxx") or \
str(full_file).lower().endswith(".cppm") or \
str(full_file).lower().endswith(".ixx") or \
str(full_file).lower().endswith(".cp") or \
str(full_file).lower().endswith(".c++"):
headers = await get_random_auth_headers(tokens)
# await asyncio.sleep(1)
logger.debug('Before commit request')
async with aiohttp.ClientSession() as session3:
async with session3.get(raw_url, headers=headers, ssl=False, raise_for_status=True) as response1:
after_file_code_bytes = await response1.read()
logger.debug('After request1')
extention = full_file.suffixes[0]
await save_to_file(
cwe_id, commit_sha, args.output, False, iter, cve_id, repo_name, after_file_code_bytes, extention)
logger.debug('After save_file_after')
# check history and get prev version
url_for_certain_file_by_certain_sha = f'{repo_url}/commits?sha={commit_sha}&path={file["filename"]}'.replace(
'api.github.com', 'api.github.com/repos')
# await asyncio.sleep(1)
#logger.debug(f'connecting to {url_for_certain_file_by_certain_sha}')
#logger.debug(f'Next token {headers}')
headers = await get_random_auth_headers(tokens)
prev_commits = requests.get(url_for_certain_file_by_certain_sha, headers=headers, verify=False).json()
#print(res.status_code)
#logger.debug(res.content)
#async with aiohttp.ClientSession() as session4:
# async with session4.get(url_for_certain_file_by_certain_sha, headers=headers, ssl=False, raise_for_status=True) as response4:
# #logger.debug('RESP4', response4)
# #logger.debug(f'got prev resp {url_for_certain_file_by_certain_sha}')
#res4 = await response4.read()
#logger.debug('After request2')
#prev_commits = res4.decode("utf-8")
logger.debug('After getting prev_commits')
# we need previous commit, so get commit after the first,
# since the first is the current commit
if len(prev_commits) > 1:
old_version_commit = prev_commits[1].get('sha')
old_commit_url_for_file = f'{repo_url}/commits/{old_version_commit}/{file["filename"]}'.replace(
'api.github.com', 'github.com').replace('commits/', 'raw/')
#logger.debug(f'Connecting to old_commit_url_for_file {old_commit_url_for_file}')
headers = await get_random_auth_headers(tokens)
# await asyncio.sleep(1)
prev_file_code_bytes = requests.get(old_commit_url_for_file, headers=headers, verify=False).content
# async with session4.get(
# old_commit_url_for_file,
# headers=headers,
# ssl=False,
# raise_for_status=True) as response3:
#prev_file_code_bytes = await response3.read()
logger.debug('After request resp3')
await save_to_file(
cwe_id,
commit_sha,
args.output,
True,
iter,
cve_id,
repo_name,
prev_file_code_bytes,
extention)
logger.debug('After save before file')
else:
logger.debug(f'Can\'t find prev commit for {filename} {class_name}')
async def save_to_file(
cwe_id: str,
commit_sha: str,
output_folder: str,
is_vul: bool,
num: int,
cve_id: str,
repo_name: str,
binary_text: bytes,
extention: str) -> None:
logger.debug('inside save')
if is_vul:
local_file_name = f'{num}-old{extention}'
else:
local_file_name = f'{num}-new{extention}'
if not cwe_id.lower().find('cwe') > -1:
cwe_id = 'Other'
parent_folder = Path(output_folder).absolute()
logger.debug(f'parent_folder {str(parent_folder)}')
if not parent_folder.exists():
parent_folder.mkdir(parents=True)
cwe_path = Path(parent_folder, cwe_id)
logger.debug(f'cwe_path {str(cwe_path)}')
if not cwe_path.exists():
cwe_path.mkdir(parents=True)
repo_modified_path_name = '--'.join(Path(repo_name).parts)
repo_path = Path(cwe_path, repo_modified_path_name)
logger.debug(f'repo_path {str(repo_path)}')
if not repo_path.exists():
repo_path.mkdir(parents=True)
commit_path = Path(repo_path, commit_sha)
logger.debug(f'commit_path {str(commit_path)}')
if not commit_path.exists():
commit_path.mkdir(parents=True)
full_file = Path(commit_path, local_file_name)
logger.debug(f'full_path {str(full_file)}')
with open(full_file, 'wb') as w:
w.write(binary_text)
with open(Path(commit_path, 'meta.json'), 'w') as w:
json.dump(
{
"commit_id": commit_sha,
"cve_id": cve_id,
"cwe_id": cwe_id
}, w)
async def html_str(html_str: str) -> List[str]:
parser = etree.HTMLParser()
tree = etree.parse(StringIO(html_str), parser)
xpath = '//div[@class = "TimelineItem-body"]//ol/li/div/p/a[contains(@class, "Link--primary")]'
commits_found_by_xpath = tree.xpath(xpath)
href_list = []
for found_commit in commits_found_by_xpath:
attrib = found_commit.attrib.get("href")
if attrib:
href = attrib.strip()
href_list.append(href)
return href_list
async def get_random_auth_headers(tokens):
username, token = get_random_token(tokens)
headers = {'Authorization': "token {}".format(token)}
return headers
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
async def main():
parser = ArgumentParser()
parser.add_argument(
"--output",
"-o",
type=Path,
required=True,
help="Path to output folder")
parser.add_argument(
"--input",
"-i",
type=Path,
required=True,
default=Path('vul4j/data/vul4j_dataset.csv'),
help="Path to input vul4j csv")
args = parser.parse_args()
df = pd.read_csv(args.input)
df = df[df['codeLink'].str.startswith('https://github.com/')]
commits = set()
#iterrows = list(df.iterrows())[0:20]
for _, item in tqdm(df.iterrows(), total=df.shape[0]):
#logger.debug(f"Commit_id {item['commit_id']}; {item['commit_id'].replace('github.com', r'api.github.com/repos')}")
github_commit = item['codeLink'].replace('github.com', r'api.github.com/repos').replace(
r'/commit/',
r'/commits/')
cwe_id = item['CWE ID']
cve_id = item.get('CVE ID')
#logger.debug(f'github_commit {github_commit}')
commits.add((github_commit, cwe_id, cve_id))
#all_commits = list(commits)
#commits = list(commits)[0:51]
chun = list(chunks(list(commits), 30))
#logger.debug(chun)
pbar = tqdm(total=len(chun))
for commits in chun:
logger.debug(1)
session = aiohttp.ClientSession()
try:
await asyncio.gather(
*[process_commit(com, cwe_id, cve_id, session, args) for
com, cwe_id, cve_id in
commits])
pbar.update()
await session.close()
except:
pbar.update()
continue
finally:
await session.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"noreply@github.com"
] | noreply@github.com |
4418de44f1a68754463c019ddf6e814c07bf403f | d9bb1645feb748ad0b80bf831bb101c40302b44a | /site/popvox/db/mark_reintros.py | fc8c12a01ccc4ae16825ba5cb9b0b04f2b1ca9be | [] | no_license | POPVOX/site | 43fb2db0427857fdc5c0da0930757e8ba2472622 | 3324e6b40f2833e089a4235bb618d878e94cb510 | refs/heads/master | 2021-03-24T09:53:59.936863 | 2015-06-16T14:41:10 | 2015-06-16T14:41:10 | 8,064,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!runscript
from popvox.models import Bill
for line in open("/home/annalee/bills_matched.tsv"):
fields = line.strip().split("\t")
a = Bill.objects.get(id=fields[0])
b = Bill.objects.get(id=fields[4])
assert(a.congressnumber == 112)
assert(fields[2] == a.displaynumber())
assert(b.congressnumber == 111)
assert(fields[6] == b.displaynumber())
b.reintroduced_as = a
b.save()
| [
"josh@popvox.com"
] | josh@popvox.com |
52b87492ba8f90766eeea4b0072e15fbc110602c | c4882f5d6ff1555cde7ed7fd3e173e80040f20ad | /yapo_valdivia.py | c4be28d1b5641a19d86388d5d54e76d8beecb9b0 | [] | no_license | GMA950/CasoDeEstudioYapoBocca | 990080b40a633d3133ba837fc70c35f218d876c9 | a03f7d2a490493a8b22946f1483f735affc7be79 | refs/heads/master | 2022-12-04T06:45:40.287001 | 2020-08-30T05:51:48 | 2020-08-30T05:51:48 | 291,407,949 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,276 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 18 17:00:00 2018
"""
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import xlsxwriter
import urllib.request
from selenium import webdriver
import time
profile = pd.DataFrame(columns=['Link', 'Title', 'Description', 'Price','Date', 'Name', 'Category'])
browser = webdriver.Chrome()
myUrl = 'https://www.yapo.cl/los_rios/todos_los_avisos?ca=11_s&l=0&w=1&cmn=243'
browser.get(myUrl)
pageSoup = soup(browser.page_source, 'html.parser')
pages = pageSoup.find('span', {'class', 'nohistory FloatRight'}).a['href']
index = pages.rfind('=')
lastPage = int(pages[index+1:])
pages = pages[:index+1]
for i in range(lastPage):
url = pages + str(i+1)
browser.get(url)
pageSoup = soup(browser.page_source, 'html.parser')
links = pageSoup.findAll('td', {'class' : 'thumbs_subject'})
for link in links:
h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13 = 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'
print(link.find('a',{'class':'title'})['href'])
browser.get(link.find('a',{'class':'title'})['href'])
pageSoup = soup(browser.page_source, 'html.parser')
if(pageSoup.find('h1', {"id" : "da_subject"})):
h1 = pageSoup.find('h1', {"id" : "da_subject"}).text.strip()
if(pageSoup.find('article')):
h2 = pageSoup.find('article').find('div',{"id" : "dataAd"}).attrs['data-datetime'].split(' ', 1)[0]
if(pageSoup.find('aside', {"class" : "sidebar-right"})):
aside = pageSoup.find('aside', {"class" : "sidebar-right"})
print("username?")
h3=aside.find('seller-info').attrs['username']
print(h3)
if(pageSoup.find('div', {"class" : "referencial-price text-right"})):
h4 = pageSoup.find('div', {"class" : "referencial-price text-right"}).text.strip().replace(u'\n', u' ').replace(u'\t', u'')
if(pageSoup.find('div', {"class" : "price text-right"})):
h5 = pageSoup.find('div', {"class" : "price text-right"}).text.strip().replace(u'\n', u' ').replace(u'\t', u'')
print("price:"+h5)
table = pageSoup.find('table')
#tr = table.findAll('tr')
#t = {}
#for k in tr:
# if(k.th and k.td):
# t[k.th.text.strip()] = k.td.text.strip()
#if 'Tipo de inmueble' in t.keys():
# h6 = t['Tipo de inmueble']
#if 'Comuna' in t.keys():
# h7 = t['Comuna']
#if 'Superficie total' in t.keys():
# h8 = t['Superficie total']
#if 'Superficie útil' in t.keys():
# h9 = t['Superficie útil']
#if 'Dormitorios' in t.keys():
# h10 = t['Dormitorios']
#if 'Baños' in t.keys():
# h11 = t['Baños']
#if 'Código' in t.keys():
# h12 = t['Código']
if(pageSoup.find('div', {"class" : "description"})):
try:
h13 = pageSoup.find('div', {"class" : "description"}).text.split(' ', 1)[1].strip().replace(u'\n', u' ')
except:
continue
if(pageSoup.find('div', {"class" : "breadcrumbs"})):
h14 = pageSoup.find('div', {"class" : "breadcrumbs"}).find('a', {"id" : "breadcrumb_category"}).find('strong').text.strip().replace(u'\n', u' ')
print(h14)
#if(pageSoup.find('div', {'class':'phoneUser'})):
# h14_text = pageSoup.find('div', {'class':'phoneUser'})
# if(h14_text.img):
# h14 = 'yapo.cl' + h14_text.img['src']
ser = pd.Series([link.a['href'], h1, h13, h5, h2, h3, h14],
index =['Link', 'Title', 'Description', 'Price', 'Date', 'Name', 'Category'])
profile = profile.append(ser, ignore_index=True)
print(link.a['href'])
#print(profile)
filename = 'fre.xlsx'
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
profile.to_excel(writer, index=False)
writer.save()
| [
"francobocca.97@gmail.com"
] | francobocca.97@gmail.com |
824fe2517075df54beda8ff30d6a67ef447af8ae | 7a7bbdaab3cedcd9b89d9245d67fe7cc472fc288 | /1_dimention/2577.py | 057b91d168293a509f38fbb985532bb1d9aba21b | [] | no_license | rheehot/baekjun | cd213c6903e69a8e48b4942c950048c1c3e03c34 | 44792c6d125af7d9b0739c571e7918c802f73c01 | refs/heads/master | 2023-02-12T08:42:47.578842 | 2020-12-21T00:40:35 | 2020-12-21T00:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | a = int(input())
b = int(input())
c = int(input())
mul = str(a * b * c)
for i in range(10):
print(mul.count(str(i))) | [
"sol35352000@gmail.com"
] | sol35352000@gmail.com |
de152f2617816773ea8a560bd180c0f58a88d540 | 623476abf83be73d64b88f2d6bead649a37c0c51 | /remove-txt.py | bbe9d0c6389c3660ce1897a7bd0f7ede8b69504e | [] | no_license | AquariniqueMu/cat-train-datasets | 9910bd612cc5caa25cfa0e3e175c3555deb0d8e4 | 558979973cfbc0000061d597cf4848dc41a6958f | refs/heads/master | 2022-11-06T10:37:59.242122 | 2020-06-26T16:16:19 | 2020-06-26T16:16:19 | 275,196,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | infile = "./train_list.txt"
outfile = "./list.txt"
delete_list = ["cat_12_train/"]
fin = open(infile)
fout = open(outfile, "w+")
for line in fin:
for word in delete_list:
line = line.replace(word, "")
fout.write(line)
fin.close()
fout.close() | [
"2101848138@qq.com"
] | 2101848138@qq.com |
2ad1d177babfa66832a827145afb904032abe5f8 | 6e18b1f0818740809f8c21c772e5bc7be304a6cc | /ProcessNewsTitles.py | a5d3ad06fd5fa38e287325ab24062b44f2e0f0b5 | [] | no_license | matthewhughes/DissertationProject | a8eb50b2731608ad31b28346ffca54077e21f2ef | 686d11ced526e55d694e731d315fdba61f6ec91a | refs/heads/master | 2020-04-24T17:55:07.139751 | 2015-04-21T11:03:38 | 2015-04-21T11:03:38 | 31,339,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | # -*- coding: utf-8 -*-
import nltk
class ProcessTitles(object):
def __init__(self):
self.title = ''
self.tokenized_text = ''
self.tokens = ''
self.noun = ''
self.verb = ''
def tokenize_title(self):
self.tokenized_text = nltk.word_tokenize(self.title)
def position_tags(self):
self.tokens = nltk.pos_tag(self.tokenized_text)
def find_noun(self):
nouns_list = []
for i in self.tokens:
if (i[1] == 'NN' or i[1] == 'NNS' or i[1] == 'NNP' or i[1] == 'NNPS'):
print i
nouns_list.append([i])
noun = nouns_list[0][0]
if noun != '':
self.noun = noun
print noun
else:
print "No noun found. "
def find_verb(self):
verbs_list = []
print self.tokens
for i in self.tokens:
if(i[1] == 'VB' or i[1] == 'VBD' or i[1] == 'VBG' or i[1] == 'VBN' or i[1] == 'VBP' or i[1] == 'VBZ'):
print i
verbs_list.append([i])
try:
verb = verbs_list[0][0]
if verb != '':
self.verb = verb
print verb
except:
print "No verb found. "
def main():
news_titles = ["Parties clash over jobless figures", "Funeral celebrates Becky Watts' life",
'County footballer jailed for attacks', u'Rolls-Royce receives record £6bn engine order',
'IMF praises UK economic strategy']
Titles = ProcessTitles()
try:
for i in range(0, len(news_titles)):
Titles.title = i
Titles.tokenize_title()
Titles.position_tags()
Titles.find_noun()
Titles.find_verb()
print Titles.noun
print Titles.verb
except IndexError:
pass
if __name__ == '__main__':
main()
| [
"me@matthewhughes.co.uk"
] | me@matthewhughes.co.uk |
7edc6fff571a84060e84abd7adc6b4b898962426 | 058656c6fced1d8b616d4abf35970e002ef50fab | /ynab/models/month_summary.py | 633a98f058d4347fcac82d8f5b5e885189d70421 | [] | no_license | MattThommes/ynab-python | e6f8d4da9504acd6e02f1b33e9ead9ba058ede3c | c9c85f449091975219270b838aed825f50e41752 | refs/heads/master | 2022-10-01T06:34:55.010170 | 2022-09-02T23:45:48 | 2022-09-02T23:45:48 | 202,883,813 | 0 | 0 | null | 2019-08-17T13:23:15 | 2019-08-17T13:23:14 | null | UTF-8 | Python | false | false | 5,596 | py | # coding: utf-8
"""
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MonthSummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'month': 'date',
'note': 'str',
'to_be_budgeted': 'float',
'age_of_money': 'float'
}
attribute_map = {
'month': 'month',
'note': 'note',
'to_be_budgeted': 'to_be_budgeted',
'age_of_money': 'age_of_money'
}
def __init__(self, month=None, note=None, to_be_budgeted=None, age_of_money=None): # noqa: E501
"""MonthSummary - a model defined in Swagger""" # noqa: E501
self._month = None
self._note = None
self._to_be_budgeted = None
self._age_of_money = None
self.discriminator = None
self.month = month
self.note = note
self.to_be_budgeted = to_be_budgeted
self.age_of_money = age_of_money
@property
def month(self):
"""Gets the month of this MonthSummary. # noqa: E501
:return: The month of this MonthSummary. # noqa: E501
:rtype: date
"""
return self._month
@month.setter
def month(self, month):
"""Sets the month of this MonthSummary.
:param month: The month of this MonthSummary. # noqa: E501
:type: date
"""
if month is None:
raise ValueError("Invalid value for `month`, must not be `None`") # noqa: E501
self._month = month
@property
def note(self):
"""Gets the note of this MonthSummary. # noqa: E501
:return: The note of this MonthSummary. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this MonthSummary.
:param note: The note of this MonthSummary. # noqa: E501
:type: str
"""
self._note = note
@property
def to_be_budgeted(self):
"""Gets the to_be_budgeted of this MonthSummary. # noqa: E501
The current balance of the account in milliunits format # noqa: E501
:return: The to_be_budgeted of this MonthSummary. # noqa: E501
:rtype: float
"""
return self._to_be_budgeted
@to_be_budgeted.setter
def to_be_budgeted(self, to_be_budgeted):
"""Sets the to_be_budgeted of this MonthSummary.
The current balance of the account in milliunits format # noqa: E501
:param to_be_budgeted: The to_be_budgeted of this MonthSummary. # noqa: E501
:type: float
"""
if to_be_budgeted is None:
raise ValueError("Invalid value for `to_be_budgeted`, must not be `None`") # noqa: E501
self._to_be_budgeted = to_be_budgeted
@property
def age_of_money(self):
"""Gets the age_of_money of this MonthSummary. # noqa: E501
:return: The age_of_money of this MonthSummary. # noqa: E501
:rtype: float
"""
return self._age_of_money
@age_of_money.setter
def age_of_money(self, age_of_money):
"""Sets the age_of_money of this MonthSummary.
:param age_of_money: The age_of_money of this MonthSummary. # noqa: E501
:type: float
"""
if age_of_money is None:
raise ValueError("Invalid value for `age_of_money`, must not be `None`") # noqa: E501
self._age_of_money = age_of_money
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MonthSummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"deanm@synopsys.com"
] | deanm@synopsys.com |
e57aebb6fb7ca69bcb5a28998f4b3016e5559651 | 47366be5cbee9d7e086291c20f97f10ab2bf74fe | /cluster/cluster_create_inequalities_subset_kdd.py | a030a3765bc70ab81a1b6e0dfd314582797a9901 | [] | no_license | nipunbatra/journal | 3d44eed05c95970606649d17402da54fc0a415ff | 94a8b88589e8f60e6f0314f8c5a374f22336b3e9 | refs/heads/master | 2021-01-09T20:40:45.844121 | 2016-07-27T15:16:29 | 2016-07-27T15:16:29 | 62,874,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,042 | py | import time
import pandas as pd
import pickle
import os
import numpy as np
SLURM_OUT = "../slurm_out"
from subprocess import Popen
import time
print "a"
out_overall = pickle.load(open('../data/input/all_regions.pkl','r'))
num_trials=25
print "b"
K = 3
for train_region in ["SanDiego"]:
if train_region=="Austin":
NUM_HOMES_MAX = 45
elif train_region=="SanDiego":
NUM_HOMES_MAX = len(out_overall['SanDiego'])
else:
NUM_HOMES_MAX = len(out_overall['Boulder'])
NUM_HOMES_MAX=20
for test_region in ["Austin"]:
if train_region!=test_region:
TRANSFORMATIONS = ["None","DD","DD-percentage","median-aggregate-percentage",
"median-aggregate",'regional','regional-percentage']
else:
TRANSFORMATIONS = ["None"]
train_df = out_overall[train_region]
test_df = out_overall[test_region]
test_df=test_df[(test_df.full_agg_available==1)&(test_df.md_available==1)]
NUM_HOMES_MIN=4
for num_homes in range(NUM_HOMES_MIN, NUM_HOMES_MAX, 2):
for transform in TRANSFORMATIONS:
#for transform in ["None","DD","DD-percentage"]:
#for transform in ["median-aggregate-percentage"]:
print transform
print "*"*40
count = 0
#for appliance in ["dw",'hvac','fridge','wm','mw','ec','wh','oven']:
for appliance in ["hvac"]:
if appliance=="hvac":
month_min, month_max = 5, 11
else:
month_min, month_max = 1, 13
count+= 1
#for appliance in ["hvac","fridge","dr","wm"]:
test_df = test_df.ix[test_df[['%s_%d' %(appliance,month) for month in range(month_min, month_max)]].dropna().index]
for test_home in test_df.index:
#for appliance in ["mw"]:
if len(test_df.ix[test_home][['%s_%d' %(appliance, m) for m in range(month_min, month_max)]].dropna())==0:
# Appliance data not present for this homes..let's save some time
continue
print appliance, test_home, count, len(test_df.index), K, transform, train_region, test_region
OFILE = "%s/%d_%s_%s_%d_%s_%s.out" % (SLURM_OUT, num_homes, train_region[0], test_region[0], test_home, appliance[0], transform[0] )
EFILE = "%s/%d_%s_%s_%d_%s_%s.err" % (SLURM_OUT, num_homes, train_region[0], test_region[0], test_home, appliance, transform )
SLURM_SCRIPT = "%d_%s_%s_%d_%s_%s.pbs" % (num_homes, train_region[0], test_region[0], test_home, appliance[:2], transform)
CMD = 'python ../new_experiments/create_inequalities_subset_kdd.py %s %s %d %s %s %d %d %d' % (train_region, test_region,
test_home, appliance,
transform, K, num_homes, num_trials)
lines = []
lines.append("#!/bin/sh\n")
lines.append('#SBATCH --time=0-05:0:00\n')
lines.append('#SBATCH --mem=16\n')
lines.append('#SBATCH -o '+'"' +OFILE+'"\n')
lines.append('#SBATCH -e '+'"' +EFILE+'"\n')
lines.append(CMD+'\n')
with open(SLURM_SCRIPT, 'w') as f:
f.writelines(lines)
command = ['sbatch', SLURM_SCRIPT]
Popen(command)
#os.remove(SLURM_SCRIPT)
print "Now sleeping.."
import time
time.sleep(40)
time.sleep(400)
time.sleep(1200)
| [
"nipunb@iiitd.ac.in"
] | nipunb@iiitd.ac.in |
99d8b6c98f1ff15f6cd292944185330484515f15 | 33136e71ce4b1ffd8398d152916140e82cc4000b | /data_collection/venv/bin/tkconch | 424b13e1b8eb069b825576f82c7989e61360d1fe | [] | no_license | Cameron-Ozaki/cs115_TrafficDump | 01477cc4f997a2397102494d426721c8e8d581e0 | 22e68de6103da8e8da2078f1e3455e6ca261a689 | refs/heads/master | 2020-04-01T11:14:24.155257 | 2019-05-02T23:03:49 | 2019-05-02T23:03:49 | 153,153,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | #!/Users/alexisflores/Desktop/TRAFFIC_DUMP/cs115_TrafficDump/data_collection/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.tkconch import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"aflore37@ucsc.edu"
] | aflore37@ucsc.edu | |
bc7a85b8948558404b30538e7fc848941f11834f | 566fdb06a1d95fda106fb9c1eb6989884967e299 | /Lab06/Lab06.py | 1a39b4887298f9e9fe3a3433c86198b92bde7bd9 | [] | no_license | blrajkumar/MCS_173-Python_Programming | e7ec7b91404da59106fba46dee9cbc9351205703 | 1305e9b4423024db218ab49e35b9896abbf56577 | refs/heads/master | 2023-02-05T08:01:30.404361 | 2020-12-21T14:19:07 | 2020-12-21T14:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,800 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 21:24:34 2020
@author: blraj
Name : Rajkumar B L
Reg.no : 2047120
Course : MCS173 - lAB06
MSC.Cs - 1st Year
"""
import re
class Lab06:
def __init__(self):
pass
def time_tracker(self,curtime):
time_result=""
stopcode = re.compile(r'^([0]?[0-4]|2[2-3])(:[0-5][0-9])?$')
keepcode = re.compile(r'^(0?[5-9]|1[0-9]|2[01])(:[0-5][0-9])?$')
if stopcode.match(curtime):
time_result = "Time to rest. Stop Coding Raj!!!"
elif keepcode.match(curtime):
time_result = "Great work raj. Keep coding!!!"
else:
time_result = "Invalid time format :("
return time_result
def is_decimal(self,val):
dec_result=""
decpattern = re.compile(r'^[0-9]+\.[0-9]+$')
if decpattern.match(val):
dec_result = "The input is decimal."
else:
dec_result = "Invalid format or is not decimal"
return dec_result
def acr_creator(self,inp_string):
acr=""
acr_result=""
acr_pattern = re.compile(r'\b[a-zA-Z]')
matches=acr_pattern.finditer(inp_string)
for match in matches:
acr+=match.group(0)
if acr_pattern.match(inp_string):
acr_result = "The acronym for given string is "+acr
else:
acr_result = "Invalid Input String !!!"
return acr_result
def rep_wordletrs(self,inp_string):
rep_result = ""
rep_pattern = re.compile(r'\b(\w+)\1\b')
list_words=[]
matches=rep_pattern.finditer(inp_string)
for match in matches:
list_words.append(match.group(0))
if list_words:
rep_result = "The list of words with matching letters are "+str(list_words)
else:
rep_result = "No matches !!!"
return rep_result
auth="*"*40
eqsym="="*50
minsym="-"*18
lobj = Lab06()
print("\n" + auth + "\n*\t\t\t\t\t\t\t\t\t *")
print("*\t\tName : Rajkumar B L\t\t *")
print("*\t\tReg.no : 2047120\t\t\t *")
print("*\t\tCourse : MCS173 - lAB06\t\t *")
print("*\t\t\t\t\t\t\t\t\t *\n"+auth+"\n")
#Execution of Time Tracker Function
#%%
print(eqsym+"\n\tRunning Function - Time Tracker\n"+eqsym+"\n")
print("Case 01:\n\tFunc --> time_tracker(\"12\")")
print("\tResult -->",lobj.time_tracker("12"))
print("Case 02:\n\tFunc --> time_tracker(\"22:00\")")
print("\tResult -->",lobj.time_tracker("22:00"))
print("Case 01:\n\tFunc --> time_tracker(\"5:01\")")
print("\tResult -->",lobj.time_tracker("5:01"))
print("Case 01:\n\tFunc --> time_tracker(\"two 'o' clock\")")
print("\tResult -->",lobj.time_tracker("two 'o' clock"))
print("\n"+minsym+" END FUNC 01 "+minsym+"\n")
#%%
#Execution of Is Decimal Function
#%%
print(eqsym+"\n\tRunning Function - Is Decimal\n"+eqsym+"\n")
print("Case 01:\n\tFunc --> is_decimal(\"12345\")")
print("\tResult -->",lobj.is_decimal("12345"))
print("Case 02:\n\tFunc --> is_decimal(\"123.456\")")
print("\tResult -->",lobj.is_decimal("123.456"))
print("Case 03:\n\tFunc --> is_decimal(\"5199999999.99999999\")")
print("\tResult -->",lobj.is_decimal("5199999999.99999999"))
print("Case 04:\n\tFunc --> is_decimal(\"5point34\")")
print("\tResult -->",lobj.is_decimal("5point34"))
print("\n"+minsym+" END FUNC 01 "+minsym+"\n")
#%%
#Execution of Acronym Creator Function
#%%
print(eqsym+"\n\tRunning Function - Acronym Creator\n"+eqsym+"\n")
print("Case 01:\n\tFunc --> acr_creator(\"National Aeronautics Space Administration\")")
print("\tResult -->",lobj.acr_creator("National Aeronautics Space Administration"))
print("Case 02:\n\tFunc --> acr_creator(\"You Only Live Once\")")
print("\tResult -->",lobj.acr_creator("You Only Live Once"))
print("Case 03:\n\tFunc --> acr_creator(\"Laugh Out Loud\")")
print("\tResult -->",lobj.acr_creator("Laugh Out Loud"))
print("Case 04:\n\tFunc --> acr_creator(\"123 564\")")
print("\tResult -->",lobj.acr_creator("123 564"))
print("\n"+minsym+" END FUNC 01 "+minsym+"\n")
#%%
#Execution of Finding Word Repeater Function
#%%
print(eqsym+"\n\tRunning Function - Finding Word Repeater\n"+eqsym+"\n")
print("Case 01:\n\tFunc --> rep_wordletrs(\"Hey murmur , how is tartar ?\")")
print("\tResult -->",lobj.rep_wordletrs("Hey murmur, how is tartar ?"))
print("Case 02:\n\tFunc --> rep_wordletrs(\"Hello world\")")
print("\tResult -->",lobj.rep_wordletrs("Hello worldr"))
print("Case 03:\n\tFunc --> rep_wordletrs(\"YAY! mama is making yumyum cake\")")
print("\tResult -->",lobj.rep_wordletrs("YAY! mama is making yumyum cake"))
print("\n"+minsym+" END FUNC 01 "+minsym+"\n")
#%% | [
"49354075+rajkumarbl@users.noreply.github.com"
] | 49354075+rajkumarbl@users.noreply.github.com |
d4bf2779cbe8ba8c4b40d92539d2f826f71b6ac1 | 0d0d584429bd44f6675a581ad9b9c4190e6dfd07 | /print_lol2.py | 5772fc788a5939813e55c2df73f3d6110f831de0 | [] | no_license | Rimmii/PythonStudy | 08134f6a01fd9c85881380247578e6da952707d0 | 9f3fff5117358f476310566628e0fa7ba9ef35b2 | refs/heads/master | 2020-11-30T00:29:42.703112 | 2017-07-22T14:34:50 | 2017-07-22T14:34:50 | 95,650,223 | 2 | 1 | null | 2017-07-13T14:12:57 | 2017-06-28T09:03:59 | Python | UTF-8 | Python | false | false | 523 | py | movies = ["The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91, ["Graham Chapman", ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle",
"Terry Jones"]]]
def print_lol(the_list, indent = False, level = 0):
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item, indent, level+1)
else:
if indent:
for tab_stop in range(level):
print("\t", end='')
print(each_item)
print_lol(movies, True, 2)
| [
"rimi@appleui-MacBook-Air-2.local"
] | rimi@appleui-MacBook-Air-2.local |
254e98217498dea904c67279827063013f34b5fb | e6421de3f06af8be4234e9901d71f86b31c6c3a7 | /pdenv/bin/easy_install-3.5 | 5e6b880f3fb8150f6afd21b014f591583dfa7719 | [
"MIT"
] | permissive | Elmartin913/PanDjango | bdb5446ee18ee297c23199cd3f9dd59cae555135 | 3b1eb52d53c87365f3d2fa5bd7ef72843ed5af32 | refs/heads/master | 2022-12-11T04:44:05.229530 | 2018-05-11T10:16:07 | 2018-05-11T10:16:07 | 128,903,323 | 0 | 0 | MIT | 2022-12-08T00:57:53 | 2018-04-10T08:54:10 | CSS | UTF-8 | Python | false | false | 276 | 5 | #!/home/elmartin913/workspace/app/PanDjango/pdenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"marcin.jab@wp.pl"
] | marcin.jab@wp.pl |
05f02000e82ea0aa84a9665a9401fad1feec02b2 | 03587c34370995706871e45320264c2636d795f0 | /app/views/loja/AvaliacaoView.py | a391584f99994610a29e9c4c605cadf597837918 | [] | no_license | caiomarinhodev/fastdelivery | 29d1f95dc7204369806e6b99298c9aaafab5ea9f | 6ad45aa596e204b793ba47f7a0c1b918a2e0890a | refs/heads/master | 2020-03-12T03:18:04.507010 | 2018-04-20T23:49:13 | 2018-04-20T23:49:13 | 130,421,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.views.generic import DetailView
from app.models import Request, Avaliacao
class AvaliacaoView(LoginRequiredMixin, DetailView):
template_name = 'loja/avaliacao_cliente.html'
login_url = '/define/login/'
model = Request
context_object_name = 'pedido_obj'
def get(self, request, *args, **kwargs):
return super(AvaliacaoView, self).get(request, *args, **kwargs)
def add_avaliacao(request):
data = request.POST
pedido = Request.objects.get(id=data['pedido'])
if 'comentario' and 'nota' in data:
aval = Avaliacao(cliente=pedido.cliente, estabelecimento=pedido.estabelecimento, nota=data['nota'],
comentario=data['comentario'])
aval.save()
else:
messages.error(request, 'Insira uma nota e um comentario')
return redirect('/avaliacao/pedido/' + str(data['pedido']))
messages.success(request, 'Avaliacao Realizada com Sucesso')
return redirect('/acompanhar-pedido/' + str(data['pedido']))
| [
"caiomarinho8@gmail.com"
] | caiomarinho8@gmail.com |
72a932800f08712333825d0cb293d2481d559bd7 | bfd97c268252b1e28addfb924cbc052e1bafb828 | /ball_detection_video.py | 9956c8e73d8516db996f21c31f3da847cf11ac0b | [] | no_license | yufanana/FanBot | 417938dd2ee8b3ab670703d6fdd50b14b1413a85 | 8cc02212300646e0f9ef349a0cebdc3ce34c937a | refs/heads/main | 2023-06-21T10:23:36.048109 | 2021-08-01T08:39:31 | 2021-08-01T08:39:31 | 391,380,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,329 | py | #!/usr/bin/env python
import numpy as np
import cv2
def filter_color(rgb_image, lower_bound_color, upper_bound_color):
#convert the image into the HSV color space
hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)
#define a mask using the lower and upper bounds of the yellow color
mask = cv2.inRange(hsv_image, lower_bound_color, upper_bound_color)
return mask
def getContours(mask):
contours, hierarchy = cv2.findContours(mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def draw_ball_contour(binary_image, rgb_image, contours):
max_area = 0
max_c = None
for c in contours:
area = cv2.contourArea(c)
if area > max_area:
max_area = area
max_c = c
# # only draw contours that are sufficiently large
if (area>3000):
((x, y), radius) = cv2.minEnclosingCircle(c)
cv2.drawContours(rgb_image, [c], -1, (150,250,150), 2)
#cx, cy = get_contour_center(c)
#cv2.circle(rgb_image, (cx,cy),(int)(radius),(0,0,255),2)
#print ("Area: {}".format(area))
# draw circle for the largest contour
((x, y), radius) = cv2.minEnclosingCircle(max_c)
cv2.drawContours(rgb_image, [c], -1, (150,250,150), 2)
cx, cy = get_contour_center(max_c)
cv2.circle(rgb_image, (cx,cy),(int)(radius),(0,0,255),2)
#print ("number of contours: {}".format(len(contours)))
cv2.imshow("RGB Image Contours",rgb_image)
def get_contour_center(contour):
M = cv2.moments(contour)
cx=-1
cy=-1
if (M['m00']!=0):
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
return cx, cy
def on_trackbar(val):
pass
def createTrackBars():
cv2.namedWindow("TrackedBars")
cv2.resizeWindow("TrackedBars", 640, 100)
cv2.createTrackbar("Hue Min", "TrackedBars", 40, 179, on_trackbar)
cv2.createTrackbar("Hue Max", "TrackedBars", 65, 179, on_trackbar)
cv2.createTrackbar("Sat Min", "TrackedBars", 120, 255, on_trackbar)
cv2.createTrackbar("Sat Max", "TrackedBars", 255, 255, on_trackbar)
cv2.createTrackbar("Val Min", "TrackedBars", 50, 255, on_trackbar)
cv2.createTrackbar("Val Max", "TrackedBars", 255, 255, on_trackbar)
def getTrackBarValues():
hue_min = cv2.getTrackbarPos("Hue Min", "TrackedBars")
hue_max = cv2.getTrackbarPos("Hue Max", "TrackedBars")
sat_min = cv2.getTrackbarPos("Sat Min", "TrackedBars")
sat_max = cv2.getTrackbarPos("Sat Max", "TrackedBars")
val_min = cv2.getTrackbarPos("Val Min", "TrackedBars")
val_max = cv2.getTrackbarPos("Val Max", "TrackedBars")
lower = np.array([hue_min, sat_min, val_min])
upper = np.array([hue_max, sat_max, val_max])
return lower,upper
def main():
# create capture object from file
video_capture = cv2.VideoCapture('videos/ball_video1.mp4')
createTrackBars()
frame_counter = 0
while(True):
frame_counter += 1
ret, frame = video_capture.read()
lower,upper = getTrackBarValues()
# loop the video
if frame_counter == video_capture.get(cv2.CAP_PROP_FRAME_COUNT):
frame_counter = 0
video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
# #reached end of video, exit program
# if ret == False:
# print('ret is False')
# break
waitKey = cv2.waitKey(1) & 0xFF
if waitKey == ord('r'):
frame_counter = 0
video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
print('R pressed. Restarting...')
elif waitKey == ord('q'):
print('Q pressed. Exiting...')
break
# ball detection algorithm
# yellowLower = (30, 150, 100)
# yellowUpper = (50, 255, 255)
# binary_frame_mask = filter_color(frame, yellowLower, yellowUpper)
binary_frame_mask = filter_color(frame, lower, upper)
contours = getContours(binary_frame_mask)
draw_ball_contour(binary_frame_mask, frame,contours)
lower,upper = getTrackBarValues()
print("lower: {}, upper: {}".format(lower,upper))
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | [
"yufan.fong@gmail.com"
] | yufan.fong@gmail.com |
14e191ebed6b1010306ffabf744cedd50eaa84c0 | 24272539830eeaa048238c38c02721039cde4578 | /neuron_simulator_service/DSGC_SAC/array_DSGC_SAC.py | 4f17ad2c1090f8e8c113de58efb48cc7203c28c1 | [
"MIT"
] | permissive | jpm343/RetinaX | ac993c61b881e778cb0442490294fbb1ceadcdad | 63f84209b4f8bdcdc88f35c54a03e7b7c56f4ab3 | refs/heads/main | 2023-01-23T21:24:42.717425 | 2020-11-23T15:56:15 | 2020-11-23T15:56:15 | 303,671,438 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,265 | py | """
Created on May 2019
@author: Leo Medina
"""
from __future__ import division
from DSGC_SAC import *
import numpy as np
import time
import datetime
import os
import iodef as io
import sys
# Parameters
###############################################################################
G_MIN_EXP = -6
G_MAX_EXP = -3
G_NUMBER = 20
VELOC_MIN_EXP = -1
VELOC_MAX_EXP = 2
VELOC_NUMBER = 20
PARAM_NUMBER = sys.argv[1]
diag = np.sqrt(250 ** 2 + 300 ** 2)
angles = (0, 45, 90, 135, 180, 225, 270, 315)
d_inits = (0, 0, 0, diag, 2 * dsgc_x, 2 * diag, 2 * dsgc_y, diag)
# Factors determined by simulations:
excfactor = 3.6 # factor of excitation (times of BP excitation on SAC)
inhfactor = 0.46 # factor of inhibition (times of SAC-SAC inhibition)
overlap_thresh = 500 # threshold for area overlap in SAC-DSGC synapse
olap_factor = 1 # Multiplies the overlap area
###############################################################################
if os.path.exists('./array-DSGC-SAC.json'):
io.load_params_json('./array-DSGC-SAC.json', globals())
if os.path.exists('./gabaInit.json'):
print "Loading GABA initial parameters from file..."
sc1i, sc2i = io.load_gabaInit_file('./gabaInit.json')
else:
sc1i, sc2i = s.gabaInit(s.excGmin)
gmax = np.logspace(G_MIN_EXP, G_MAX_EXP, G_NUMBER)
stimuliSpeed = np.logspace(VELOC_MIN_EXP, VELOC_MAX_EXP, VELOC_NUMBER)
synapse_type = s.synapse_type # Note this param is in SACnetwork.py
params = [(g, v) for g in gmax for v in stimuliSpeed]
start_time = time.time()
today = datetime.date.today()
print "Simulation using " + synapse_type
print "Starting simulation: "
simPath = "./results/%s/" % today
if not os.path.exists(simPath): # only process 0 attempts to create folder
os.makedirs(simPath)
numv = len(s.amac_rec)
numsc = len(s.amac_rec)
head_entries = ['v%dp' % n for n in range(numv)] + \
['DSv%d' % n for n in range(numv)] + \
['sc%dp' % n for n in range(numsc)] +\
['DSsc%d' % n for n in range(numsc)] +\
['sc%dpa' % n for n in range(numsc)] +\
['DSsca%d' % n for n in range(numsc)]
head = 'speed,gmax,angle,nspk,width,speed1,' + ','.join(head_entries)
head_dsi = 'speed,gmax,dsi,dsi_angle180'
grid = {'Initial speed': stimuliSpeed[0],
'Final Speed': stimuliSpeed[-1],
'Speed points': len(stimuliSpeed),
'Initial conductance': gmax[0],
'Final conductance': gmax[-1],
'Conductance points': len(gmax)}
if not isinstance(PARAM_NUMBER, list):
PARAM_NUMBER = [PARAM_NUMBER]
res = []
res_dsi = []
for param_num in PARAM_NUMBER:
g, sp = params[int(param_num)]
print "Running sim with speed %f and max conductance %f" % (sp, g)
# Assuming a field of about 1 mm, simulation must be long enough for bar to
# cover the entire field. sp in mm/s, multiply by 1000 to convert to ms
t_total = s.t_es + (1.0 +
s.stim_param['bar_width'] /
1000.0) / sp * 1000.0
if t_total < 1500:
t_total = 1500 # Sim no shorter than 1.5 s, see calculation of DSI
s.tstop = t_total
gBPsyn = bipolar_DSGC_synapses(s)
GABAsynpos, synM = SAC_DSGC_synapses(s, thresh=overlap_thresh,
overlap_factor=olap_factor,
sym=is_symmetric)
nspk = []
for (ang, d_init) in zip(angles, d_inits):
stim_param['bar_x_init'] = d_init
stim_param['bar_angle'] = ang
stim_param['bar_speed'] = sp
# Setting DSGC inputs ################################################
# This is for DSGC inputs. Note that SAC syanpses are set and defined
# SAC_network
dscell.setv(v_init, v_init, v_init_axon)
gBPsyn = s.topo.excGset(gBPsyn, gmax=excfactor * g, gmin=excGmin,
synapse_type=synapse_type, tau=tau)
s.stim.set_stimulus(gBPsyn, stimulus_type, t_inf, synapse_type,
**stim_param)
s.topo.gabaGset(gSACsyn, synM, inhfactor * s.gabaGmax, gabaGmin,
k1, k2, th1, th2, e_rev)
apcount = s.h.APCount(dscell.axon(.9))
res_i, amac_vecs, synapse_vecs = s.main(excmax=g,
gabaI=(sc1i, sc2i),
main=1,
stim_param=stim_param,
tstop=t_total,
v_init=SAC_v_init)
nspk.append(apcount.n)
res_i = np.hstack([sp, g, ang, apcount.n, np.hstack(res_i)])
res.append(res_i)
res_dsi.append(np.hstack([sp,
g,
s.tl.DSI(angles, nspk),
s.tl.DSI(angles, nspk, ref_angle=180)]))
print "Time elapsed: ", time.time() - start_time, "s"
np.savetxt(simPath +
synapse_type +
'_%s_%s_%s_gmax_data%s_heat-grande_%s_array%s.txt' %
(s.amac_rec[0], s.amac_rec[1],
s.amac_rec[2], s.dataidx,
s.today,
param_num),
np.array(res),
fmt='%6g', delimiter=',', header=head)
np.savetxt(simPath +
synapse_type +
'_data_%s_date_%s_array_%s_dsi.txt' %
(s.dataidx,
s.today,
param_num),
np.array(res_dsi),
fmt='%6g', delimiter=',', header=head_dsi)
param_set = {'Speed': sp,
'Conductance': g}
# Parameters
params_dict = dict()
params_dict.update(param_set)
params_dict.update(grid)
params_dict.update(s.all_params)
io.save_params_json(simPath +
synapse_type +
'_simulation_parameters_%s_%s.json' %
(s.today, param_num), params_dict)
if len(PARAM_NUMBER) > 1:
np.savetxt(simPath +
synapse_type +
'_%s_%s_%s_gmax_data%s_heat-grande_%s_all.txt' %
(s.amac_rec[0], s.amac_rec[1],
s.amac_rec[2], s.dataidx,
s.today),
np.array(res),
fmt='%6g', delimiter=',', header=head)
| [
"juan.martinez.a@usach.cl"
] | juan.martinez.a@usach.cl |
1814f74e34e2e1cff5affcee75d2aa83ee5444c8 | 7b4705831a6cf12ace488022b1973da29854814f | /multilingual/jsonify.py | 05aa72af25991cf7de15611ab75edf287449361d | [] | no_license | nitaku/WordNet_Atlas_core | c38b556c16dd478c5e1939f198ac4f3b75249458 | 84bdf287933991a00377afb691343139c27bcd61 | refs/heads/master | 2023-08-04T10:47:49.175610 | 2023-07-20T15:10:39 | 2023-07-20T15:10:39 | 16,588,554 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | import sys
lang = sys.argv[1]
from graph_tool.all import *
g = load_graph(lang+'/wnen30_core_noun_tree_longest_w_senses_'+lang+'.graphml.xml.gz')
graph = {
'nodes': [],
'links': []
}
is_synset = g.vertex_properties['is_synset']
lemma = g.vertex_properties['lemma']
# sensenum = g.vertex_properties['sensenum']
pos = g.vertex_properties['pos']
senseid = g.new_vertex_property('int32_t')
g.vertex_properties['senseid'] = senseid
print 'Adding fake senseids...'
sid = 0
for v in g.vertices():
if not is_synset[v]:
senseid[v] = sid
sid += 1
# sensekey = g.vertex_properties['sensekey']
synsetid = g.vertex_properties['synsetid']
definition = g.vertex_properties['definition']
# is_core_sense = g.vertex_properties['is_core_sense']
tree_link = g.edge_properties['tree_link']
print 'Converting...'
for v in g.vertices():
if not is_synset[v]:
o = {
'type': 'sense',
'lemma': lemma[v],
# 'sensenum': sensenum[v],
'pos': pos[v],
'id': senseid[v],
# 'sensekey': sensekey[v]
}
# if is_core_sense[v]:
# o['is_core'] = True
else:
assert is_synset[v]
o = {
'type': 'synset',
'id': synsetid[v],
'defintion': definition[v],
'pos': pos[v]
}
graph['nodes'].append(o)
for e in g.edges():
source = e.source()
target = e.target()
if not is_synset[source]:
# source is a sense
source_id = senseid[source]
else:
assert is_synset[source]
source_id = synsetid[source]
if not is_synset[target]:
# target is a sense
target_id = senseid[target]
else:
assert is_synset[target]
target_id = synsetid[target]
o = {'source': source_id, 'target': target_id}
if tree_link[e]:
o['is_tree_link'] = True
graph['links'].append(o)
import json
with open('wnen30_core_n_longest_'+lang+'.json','wb') as f:
f.write(json.dumps(graph))
| [
"matteo.abrate@gmail.com"
] | matteo.abrate@gmail.com |
5318d72a8124bd5381730949cdadb96c7de69899 | 4f87d39e58dffa2c7ec83687bdd316052ce6bf3c | /lib/python2.7/linecache.py | 628cc488f2e8762e7386a7f3eb76d63a4430c643 | [] | no_license | MisterRios/Bread_Calculator | aafff9aafad42b4913583f90b99394abf81b3e86 | 4cea008955e288c2752e6f4eb55853755fc85f87 | refs/heads/master | 2021-01-19T10:08:20.034898 | 2014-03-27T08:27:01 | 2014-03-27T08:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | /Users/misterrios/anaconda/lib/python2.7/linecache.py | [
"MisterRios@gmail.com"
] | MisterRios@gmail.com |
6ec673beb0c506a5c90bb8c68908c0c73c13587c | 3a74ac2e7db63069945e5bc620342b4b89b8b201 | /python/dgl/distributed/rpc_server.py | ad47de7104c4d92fc64b87b7cdc82f26fefd6a38 | [
"Apache-2.0"
] | permissive | vishalbelsare/dgl | 5d17ba82f720d742e1274c5d48dac64eca234504 | 512a80b00d2cd35607a542eb5544fa1f1c93a6f6 | refs/heads/master | 2023-08-17T15:09:55.082014 | 2022-01-22T04:25:14 | 2022-01-22T04:25:14 | 167,955,673 | 0 | 0 | Apache-2.0 | 2022-01-23T13:57:57 | 2019-01-28T12:05:37 | Python | UTF-8 | Python | false | false | 4,476 | py | """Functions used by server."""
import time
from . import rpc
from .constants import MAX_QUEUE_SIZE
def start_server(server_id, ip_config, num_servers, num_clients, server_state, \
max_queue_size=MAX_QUEUE_SIZE, net_type='socket'):
"""Start DGL server, which will be shared with all the rpc services.
This is a blocking function -- it returns only when the server shutdown.
Parameters
----------
server_id : int
Current server ID (starts from 0).
ip_config : str
Path of IP configuration file.
num_servers : int
Server count on each machine.
num_clients : int
Total number of clients that will be connected to the server.
Note that, we do not support dynamic connection for now. It means
that when all the clients connect to server, no client will can be added
to the cluster.
server_state : ServerSate object
Store in main data used by server.
max_queue_size : int
Maximal size (bytes) of server queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound because DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str
Networking type. Current options are: 'socket'.
"""
assert server_id >= 0, 'server_id (%d) cannot be a negative number.' % server_id
assert num_servers > 0, 'num_servers (%d) must be a positive number.' % num_servers
assert num_clients >= 0, 'num_client (%d) cannot be a negative number.' % num_clients
assert max_queue_size > 0, 'queue_size (%d) cannot be a negative number.' % max_queue_size
assert net_type in ('socket'), 'net_type (%s) can only be \'socket\'' % net_type
# Register signal handler.
rpc.register_sig_handler()
# Register some basic services
rpc.register_service(rpc.CLIENT_REGISTER,
rpc.ClientRegisterRequest,
rpc.ClientRegisterResponse)
rpc.register_service(rpc.SHUT_DOWN_SERVER,
rpc.ShutDownRequest,
None)
rpc.register_service(rpc.GET_NUM_CLIENT,
rpc.GetNumberClientsRequest,
rpc.GetNumberClientsResponse)
rpc.register_service(rpc.CLIENT_BARRIER,
rpc.ClientBarrierRequest,
rpc.ClientBarrierResponse)
rpc.set_rank(server_id)
server_namebook = rpc.read_ip_config(ip_config, num_servers)
machine_id = server_namebook[server_id][0]
rpc.set_machine_id(machine_id)
ip_addr = server_namebook[server_id][1]
port = server_namebook[server_id][2]
rpc.create_sender(max_queue_size, net_type)
rpc.create_receiver(max_queue_size, net_type)
# wait all the senders connect to server.
# Once all the senders connect to server, server will not
# accept new sender's connection
print("Wait connections non-blockingly...")
rpc.receiver_wait(ip_addr, port, num_clients, blocking=False)
rpc.set_num_client(num_clients)
# Recv all the client's IP and assign ID to clients
addr_list = []
client_namebook = {}
for _ in range(num_clients):
# blocked until request is received
req, _ = rpc.recv_request()
assert isinstance(req, rpc.ClientRegisterRequest)
addr_list.append(req.ip_addr)
addr_list.sort()
for client_id, addr in enumerate(addr_list):
client_namebook[client_id] = addr
for client_id, addr in client_namebook.items():
client_ip, client_port = addr.split(':')
# TODO[Rhett]: server should not be blocked endlessly.
while not rpc.connect_receiver(client_ip, client_port, client_id):
time.sleep(1)
if rpc.get_rank() == 0: # server_0 send all the IDs
for client_id, _ in client_namebook.items():
register_res = rpc.ClientRegisterResponse(client_id)
rpc.send_response(client_id, register_res)
# main service loop
while True:
req, client_id = rpc.recv_request()
res = req.process_request(server_state)
if res is not None:
if isinstance(res, list):
for response in res:
target_id, res_data = response
rpc.send_response(target_id, res_data)
elif isinstance(res, str) and res == 'exit':
break # break the loop and exit server
else:
rpc.send_response(client_id, res)
| [
"noreply@github.com"
] | noreply@github.com |
787b8ee86ea8517b3aaf27aba272db0a9e7a2b55 | 37e60c454c1897cc43bbc12717031b5035dbf7d5 | /regression-tests/sparktkregtests/testcases/dicom/dicom_svd_test.py | 9688ceb76b3b3a0e3f824297fa201408e33a83d4 | [
"Apache-2.0"
] | permissive | trustedanalytics/spark-tk | 33f18ff15e2f49d2f9d57c3a4ccdbd352eb2db8c | 5548fc925b5c278263cbdebbd9e8c7593320c2f4 | refs/heads/master | 2021-01-23T22:01:11.183659 | 2017-02-07T01:35:07 | 2017-02-07T01:35:07 | 56,808,917 | 35 | 35 | NOASSERTION | 2020-03-20T00:18:24 | 2016-04-21T22:16:41 | Scala | UTF-8 | Python | false | false | 2,573 | py | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests svd on dicom frame"""
import unittest
from sparktk import dtypes
from sparktkregtests.lib import sparktk_test
from numpy.linalg import svd
from numpy.testing import assert_almost_equal
class SVDDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(SVDDicomTest, self).setUp()
dataset = self.get_file("dicom_uncompressed")
dicom = self.context.dicom.import_dcm(dataset)
self.frame = dicom.pixeldata
def test_svd(self):
"""Test the output of svd"""
self.frame.matrix_svd("imagematrix")
#get pandas frame of the output
results = self.frame.to_pandas(self.frame.count())
#compare U,V and s matrices for each image against numpy's output
for i, row in results.iterrows():
actual_U = row['U_imagematrix']
actual_V = row['Vt_imagematrix']
actual_s = row['SingularVectors_imagematrix']
#expected ouput using numpy's svd
U, s, V = svd(row['imagematrix'])
assert_almost_equal(actual_U, U, decimal=4, err_msg="U incorrect")
assert_almost_equal(actual_V, V, decimal=4, err_msg="V incorrect")
assert_almost_equal(
actual_s[0], s, decimal=4, err_msg="Singual vectors incorrect")
def test_invalid_column_name(self):
"""Test behavior for invalid column name"""
with self.assertRaisesRegexp(
Exception, "column ERR was not found"):
self.frame.matrix_svd("ERR")
def test_invalid_param(self):
"""Test behavior for invalid parameter"""
with self.assertRaisesRegexp(
Exception, "svd\(\) takes exactly 2 arguments"):
self.frame.matrix_svd("imagematrix", True)
if __name__ == "__main__":
unittest.main()
| [
"blbarker@users.noreply.github.com"
] | blbarker@users.noreply.github.com |
2765b12ae65d16d75ba1fdd4fd7dd6bf233b767a | de49501d5a8ca02de929038b5e63964d04249c3c | /misc/test.py | ac68a23d9f360ca65e415cd4fa12d8e2108fc8ef | [] | no_license | syslabcom/skimpyGimpy | 4edf38faa82de825c01c48e779e00284c1fb6aa7 | d1534a572b0311431728fb77faf9da9493112357 | refs/heads/master | 2020-06-24T06:50:17.043055 | 2019-07-25T18:52:34 | 2019-07-25T18:52:34 | 198,885,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
import waveTools
inputString = "hello world"
indexfilepath = r"c:\tmp\skimpyGimpy\waveIndex.zip"
audioString = waveTools.WaveAudioSpelling(inputString, indexfilepath)
outputFile = file("waveOutput.wav", "wb")
outputFile.write(audioString) | [
"deroiste@syslab.com"
] | deroiste@syslab.com |
115dd8125ff777bd72beae826cad30679ca99cdb | 5c12834d4b59901a8aaa634a8ecb71bcfab1d4f2 | /mecanica/settings.py | 5c2946448a7a1cbaef7bea0fa495ae9398a52d34 | [] | no_license | Josebonilla16/servicioxela | 9cad6ce8fa60fc62cc5f04f8fbc4bc886fb51602 | 74d72d2016f22e338d73c5db9554e9cb32e444db | refs/heads/master | 2020-04-04T17:49:55.784616 | 2018-11-06T17:00:12 | 2018-11-06T17:00:12 | 156,137,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for mecanica project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j1dwf6+dj3l6$es&8uuwhz80ag$@-85o!o!&xlzv=te&zt10f7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['josebonilla.pythonanywhre.com', '127.0.0.1',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'servicios',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mecanica.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mecanica.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'es-GT'
TIME_ZONE = 'America/Guatemala'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
| [
"Josephman1253@hotmail.com"
] | Josephman1253@hotmail.com |
9d6431f510ee39712539aa43d4d446d9be49cd10 | 50f85af9d7cb860b0b7d8cf3dd4fb3df9110b1b0 | /paho_mqtt.py | 52665ad63525f696cd58aab825b34226b18fcb34 | [] | no_license | lijinxianglogo/python_multi | e63c39e3f90e404c2630460208acb4b65d1a428b | 0d888d7cff85ffd3b1c612447a787bd1017a9b35 | refs/heads/master | 2021-06-07T17:48:58.644567 | 2021-06-04T03:28:05 | 2021-06-04T03:28:05 | 175,386,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
# 定义一个mqtt客户端的类实体
client = mqtt.Client()
# 通行账户和密码
client.username_pw_set('ehigh', 'Abc123')
# 连接回调函数
def connect_callback(client, userdata, flags, rc):
# print client, userdata, flags, rc
client.subscribe('/EH100602/tx/#')
def message_callback(client, userdata, message):
print client, userdata, message
print message.payload
client.on_connect = connect_callback
client.on_message = message_callback
# 服务器ip,端口,维持心跳
client.connect("192.168.4.44", 1883, 3600)
client.loop_forever() | [
"lijinxiang@everhigh.com.cn"
] | lijinxiang@everhigh.com.cn |
6c544acbaf213fa2a33ad58969026c3779b61074 | f44f397f5ef8ca9428f3b3a2cb6232169de9d118 | /final_spokenwords.py | 6415e01c0443e25ea75156c979b1bbc740e230d4 | [] | no_license | ideepika/dgplug_practice | a712eb4e479020e67a665514a955785203c81ba3 | 16a0416a34a7f72e1cfa65b48a54e5772585d3b9 | refs/heads/master | 2021-10-09T10:28:58.358801 | 2018-12-26T08:53:14 | 2018-12-26T08:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | import urllib.request
import requests
import urllib.parse
from bs4 import BeautifulSoup
import re
global i
i=0
url = "https://dgplug.org/irclogs/2017/"
url_open = urllib.request.urlopen(url)
""" create a file with all the logs aas text input """
def extract_links(url_open):
soup = BeautifulSoup(url_open, 'lxml')
# print(soup)
for line in soup.find_all('a'):
link = line.get('href')
response = requests.get(url + link)
with open('logfile.txt', 'a') as logobj:
logobj.write(response.text)
""" working on logfile and extracting nicks: spokenlines"""
def spoken_lines():
with open ('logfile.txt','r') as logobj:
data = logobj.read()
nicks = re.findall(r'<(.*?)>', data)
nicks_set = set(nicks)
print(nicks_set)
for nick in nicks:
if nick in nick_count:
nick_count[nick] += 1
else:
nick_count[nick] = 1
nick_count = {}
print(nick_count)
for keys, values in nick_count.items():
print(keys, ":", values, "lines")
extract_links(url_open)
spoken_lines()
| [
"deepikaupadhyay01@gmail.com"
] | deepikaupadhyay01@gmail.com |
615cd6b556994a362654f132d88440773408a66f | dad00a62315bc1b6434babc594c08092e508c739 | /python/django/django-vuejs/django-vuejs/homepage/migrations/0001_initial.py | 7555b7e1c46cf33260abb8ba7e400e72045eb7bb | [] | no_license | geovanecomp/Studying | 28e48fae82b2e2c3d8ff0bc3e1ad2bb17dc4371d | e422e77f44e36969a87855d30e28a86c74410d90 | refs/heads/master | 2020-04-16T01:44:21.323679 | 2018-05-09T18:20:14 | 2018-05-09T18:20:14 | 83,228,749 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 2.0.2 on 2018-02-04 00:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Games',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=255)),
('plataformas', models.CharField(max_length=255)),
],
),
]
| [
"geovane.pacheco99@gmail.com"
] | geovane.pacheco99@gmail.com |
d2811e09b26d29f12842bc4f3cbe3fb130dfc9d8 | 3266ca1e55be0b23d1a29462d017dd4b6a7e031e | /util/instr.py | 8f1ce82af86cda021c94550565b2b64556988fea | [] | no_license | Tim-Nosco/LazyLabel | 471c0ddb17f76d24bdf0415ebbc96ecc46c261ce | c8e3ef0ee912d74aaa96b6c82d51184a03b0da80 | refs/heads/master | 2021-07-17T22:08:36.582581 | 2017-10-26T19:08:37 | 2017-10-26T19:08:37 | 107,707,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | import re,logging
from z3 import *
class Instruction:
valid_names = r"([0-9A-z\%\<\>]+?)"
regx = r"^{0} = {{}} {0} {0}, {0}$".format(valid_names)
type_regx = re.compile(r"^i(\d+)$")
def __init__(self,idx,result,rtype,arg1,arg2):
self.result = result
self.type = rtype
self.arg1 = arg1
self.arg2 = arg2
def setup(self):
self.convert_type()
def get_new_vars(self):
return [self.create_result_vec()]
def unknown_vars(self):
return self.arg1, self.arg2
def fill_unknown_vars(self,*args):
self.arg1 = args[0]
self.arg2 = args[1]
def create_result_vec(self):
tmp = self.result
self.result = BitVec(tmp,self.type)
return (tmp,self.result)
def convert_type(self):
#determine type
m = re.match(self.type_regx,self.type)
if m:
self.type = int(m.group(1))
else:
logging.error("Unable to interpret type: %s",self.type)
raise Exception("Failed to parse instruction %s",self)
def execute(self):
logging.debug("Unimplemented execute: %s",self)
return []
def __repr__(self):
return self.__class__.__name__ | [
"tim.nosco@gmail.com"
] | tim.nosco@gmail.com |
e5c8432caea59848fec92f0f2a6de7d1d30703a9 | 11b58115f4ecd8e16e812da2c556ccb47a5cdd0c | /PYTHON/MRAC.py | f452a2c766cdf06fa0083725a6f6fdcdb1e5e802 | [] | no_license | PrestonWang/AA203-Final-Project | 45221c9d0789d1ac86f9ea02a7e08a1a3c19daf6 | dc10740d8fdafe278d7fceb772e0482ddbcb3069 | refs/heads/master | 2022-10-20T10:08:24.577732 | 2020-06-12T06:44:47 | 2020-06-12T06:44:47 | 259,757,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import boom
import numpy as np
boom = boom.boom(25,0.5, 0.1, .07)
boom.eps = 1e-6
q0 = np.array([5.0, np.pi/2,0.0,0.0])# initial state
params_estimate, P = boom.get_estimate(q0, np.array([10, 1.5/2, 0]))
| [
"prestonwang95@gmail.com"
] | prestonwang95@gmail.com |
528ec917b2ccb9f8c90d803158dacddb1211387a | 8575cabe675076d8ab87d5470901878d5e8b2690 | /app.py | cd3baa9647061914a303fb8246dafda0b100ef13 | [] | no_license | haxmanster/Flask_admin_DevOps | 00a5371297812636180721b495b8751472ffa130 | da7e62319f3efb08c258937bd1a99c3f3938f9df | refs/heads/master | 2020-03-27T18:05:15.167587 | 2018-08-31T13:42:04 | 2018-08-31T13:42:04 | 146,897,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,652 | py | import os
import os.path as op
from flask import Flask, url_for, redirect, render_template, request
from flask_sqlalchemy import SQLAlchemy
from wtforms import form, fields, validators
import flask_admin as admin
import flask_login as login
from flask_admin.contrib import sqla
from flask_admin import helpers, expose
from werkzeug.security import generate_password_hash, check_password_hash
from flask_admin.contrib.fileadmin import FileAdmin
# Create Flask application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
# NOTE: is_authenticated, is_active, and is_anonymous
# are methods in Flask-Login < 0.3.0
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.username
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
login = fields.StringField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
# we're comparing the plaintext pw with the the hash from the db
if not check_password_hash(user.password, self.password.data):
# to compare plain text passwords use
# if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(form.Form):
login = fields.StringField(validators=[validators.required()])
email = fields.StringField()
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise validators.ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated
# Create customized index view class that handles login & registration
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated:
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated:
return redirect(url_for('.index'))
link = '<p>Don\'t have an account? <a href="' + url_for('.register_view') + '">Click here to register.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/register/', methods=('GET', 'POST'))
def register_view(self):
form = RegistrationForm(request.form)
if helpers.validate_form_on_submit(form):
user = User()
form.populate_obj(user)
# we hash the users password to avoid saving it as plaintext in the db,
# remove to use plain text:
user.password = generate_password_hash(form.password.data)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('.index'))
link = '<p>Already have an account? <a href="' + url_for('.login_view') + '">Click here to log in.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
# Flask views
@app.route('/')
def index():
return render_template('index.html')
# Initialize flask-login
init_login()
# Create admin
admin = admin.Admin(app, 'Example: Auth', index_view=MyAdminIndexView(), base_template='my_master.html')
# Add view
admin.add_view(MyModelView(User, db.session))
path = op.join(op.dirname(__file__), 'storage')
admin.add_view(FileAdmin(path, '/storage/', name='Pliki'))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
import string
import random
db.drop_all()
db.create_all()
# passwords are hashed, to use plaintext passwords instead:
# test_user = User(login="test", password="test")
#test_user = User(login="test", password=generate_password_hash("test"))
#db.session.add(test_user)
first_names = []
last_names = []
for i in range(len(first_names)):
user = User()
user.first_name = first_names[i]
user.last_name = last_names[i]
user.login = user.first_name.lower()
user.email = user.login + "@example.com"
user.password = generate_password_hash(user.password)
db.session.add(user)
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True, port=5002) | [
"thuy05061986@gmail.com"
] | thuy05061986@gmail.com |
7b8a04e059b078e1ba5b84ff1d8be0dcdec7a196 | d6b4478e9530d9c395fb089f9c1e1c98ed631b67 | /Genetic_Algorithm_Take_3.py | 21ca313ee708607bbde17576cd877d90640ea910 | [] | no_license | kilbyjmichael/MutateMe | a4929712bd92ae1acbf8abd929f7fc17e2947a36 | c6d2a117dbe57698dbd9bd11e3021b28eeb9c695 | refs/heads/master | 2021-01-22T20:19:05.492546 | 2015-04-24T05:34:48 | 2015-04-24T05:34:48 | 34,408,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | #!/usr/bin/python
'''Mates colors!'''
import random
from PIL import Image
IMG_SIZE = (250,250)
IMG_MODE = 'RGB'
WHITE = '#FFFFFF'
BLACK = '#000000'
MAX_RGB = 255
TARGET = [137,22,88]
DNA_SIZE = 3
POP_SIZE = 200
GENERATIONS = 500
MUTATE_CHANCE = [1,200] # 1 in 100
def random_color():
# Return a random color value
color = int(random.randrange(0, MAX_RGB, 1))
return color
def make_population():
# Returns a list of POP_SIZE strings.
population = []
for x in range(POP_SIZE):
dna = [0,0,0]
for x in range(DNA_SIZE):
dna[x] = random_color()
population.append(dna)
return population
def mutate(dna_sample):
# Use MUTATE_CHANCE to create randomness in population
dna = []
for color in dna_sample:
if int(random.random()*(MUTATE_CHANCE[1]/MUTATE_CHANCE[0])) == 1:
dna.append(random_color())
else:
dna.append(color)
return dna
def fitness(dna):
# Calculate the difference between a character in the same position in the TARGET string.
fitness = 0
for i in range(3):
fitness += (abs(dna[i] + TARGET[i]) / TARGET[i]) * 10
return fitness
def mate(mother, father):
zygote = list(zip(mother, father))
child = []
for dna in zygote:
child.append(dna[random.randrange(2)])
return child
def weighted_choice(items):
"""
Chooses a random element from items, where items is a list of tuples in
the form (item, weight). weight determines the probability of choosing its
respective item. Note: this function is borrowed from ActiveState Recipes.
"""
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for item, weight in items:
if n < weight:
return item
n = n - weight
return item
def main():
# Make an initial population
population = make_population()
fittest_fitness = 0
for generation in range(GENERATIONS):
weighted_population = []
# copy from pop to wpop
for individual in population:
fitness_num = fitness(individual)
pair = (individual, fitness_num)
weighted_population.append(pair)
pos_winner, score = weighted_population[0]
# To deal with not fidning one:
if score >= fittest_fitness:
fittest_fitness = score
almost_fit = pos_winner
# To check for a winner and stop:
if pos_winner is TARGET:
print("TARGET found in Gen %s: %s" % (generation, pos_winner))
quit(0)
else:
print("Generation %s... Random sample: %s : %s" % (generation, pos_winner, score))
population = [] # Empty the houses for the kids
# fill pop back up by mating all of them
for _ in range(int(POP_SIZE/2)):
# Selection
person1 = weighted_choice(weighted_population)
person2 = weighted_choice(weighted_population)
# Mate
kid1 = mate(person1, person2)
# Mutate and add back into the population.
population.append(mutate(kid1))
# If no other is found:
print("TARGET not found:")
print("Fittest String: '%s' at a fitness of %s" % (almost_fit, fittest_fitness))
if __name__ == "__main__": main()
| [
"kilbyjmichael@gmail.com"
] | kilbyjmichael@gmail.com |
86f6f88e0ab0d6da6af0439be4ce6e06014071a7 | b2a60c8776791b25595850706ad6f3334d0aaf22 | /test/create_circuit3.py | 2baf33a3c6ffef9e7314e378f6d0b82efd86637f | [] | no_license | cislab-ntut/Garbled_Circuit_Project2_Team5 | e649b22d6cf8eb5977aa26e9491b0c4782430902 | 82eb819f6951b6706a35689e84a964506b1b3801 | refs/heads/master | 2022-09-14T14:18:12.874486 | 2019-12-31T04:10:06 | 2019-12-31T04:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | import sys
import gate
import random
import enigma_func as enigma
random_table = list()
def exist(value):
global random_table
if value in random_table:
return True
else:
return False
def get_random():
global random_table
val = random.randint(0, 25)
while exist(val):
val = random.randint(0, 25)
random_table.append(val)
return val
def get_out_org(input1_index, input2_index, t_table):
lst = list()
for i in range(len(t_table[0])):
if input1_index == t_table[0][i]:
lst.append(i)
for j in lst:
if input2_index == t_table[1][j]:
return t_table[2][j]
def create_garbled_circuit(input_circuit):
global random_table
random_table = list()
NANDList = list()
ANDList = list()
NORList = list()
ORList = list()
XORList = list()
for i in range(2):
for j in range(2):
a = gate.Gate(i,j,'AND')
ANDList.append(a.call())
b = gate.Gate(i,j,'OR')
ORList.append(b.call())
c = gate.Gate(i,j,'XOR')
XORList.append(c.call())
d = gate.Gate(i,j,'NAND')
NANDList.append(d.call())
e = gate.Gate(i,j,'NOR')
NORList.append(e.call())
a = [0,0,1,1]
b = [0,1,0,1]
nimplyList = [0,0,1,0]
gate_table = { 'and':[a,b,ANDList],
'or':[a,b,ORList],
'xor':[a,b,XORList],
'nand':[a,b,NANDList],
'nor':[a,b,NORList],
'nimply':[a,b,nimplyList] }
#input_circuit = input("please input the circuit: ")
op_table = ['or', 'and', 'xor', 'nand', 'nor', 'nimply']
lst = input_circuit.split(' ')
circuit = dict()
operator = list()
inp = list()
truth_table = dict()
enc_table = dict()
enc_t_table = dict()
enc_to_org = dict()
input_to_enc = dict()
input_table = dict()
input_table_num = list()
count = 1
for i in lst:
if i == "(":
continue
elif i == ")":
index = len(circuit)
circuit[index] = dict()
truth_table[index] = dict()
op = operator.pop()
circuit[index]['input2'] = inp.pop()
circuit[index]['input1'] = inp.pop()
circuit[index]['output'] = count
truth_table[index]['truth_table'] = gate_table[op]
inp.append(count)
count += 1
elif i in op_table:
operator.append(i)
else:
inp.append(count)
input_table[i] = count
input_table_num.append(count)
count += 1
label = dict()
for i in range(1, inp[0]+1,1):
label[i] = [get_random(), get_random()]
input_to_enc[i] = dict()
input_to_enc[i][0] = label[i][0]
input_to_enc[i][1] = label[i][1]
enc_to_org[label[i][0]] = 0
enc_to_org[label[i][1]] = 1
for index in range(0, len(circuit), 1):
g = circuit[index]
input1 = label[g['input1']]
input2 = label[g['input2']]
output = label[g['output']]
org_truth_table = truth_table[index]['truth_table']
enc_t_table[index] = dict()
lst = list()
#ans = list()
for i in input1:
for j in input2:
out_org = get_out_org(enc_to_org[i], enc_to_org[j], org_truth_table)
out_val = input_to_enc[g['output']][out_org]
#t2 = (i, j)
lst.append([enigma.enigma([i, j, 0], out_val), hash((i, j, out_val))])
#ans.append([enigma.enigma([i, j, 0], lst[len(lst)-1][0]), out_val, out_org])
enc_table[g['output']] = lst
#print(lst)
#print(ans)
return circuit, input_to_enc, enc_table, enc_to_org, input_table_num
def enc_input(input_value, circuit, input_to_enc, input_table_num):
#print("input_to_enc", input_to_enc)
#print("enc_to_org", enc_to_org)
c_index = 0
#input_value = input("input enter value (0 or 1): ")
value_list = dict()
#input_value = input_value.split(' ')
#input_value = [int(x) for x in input_value]
for index in range(len(circuit)):
g = circuit[str(index)]
input1 = g['input1']
input2 = g['input2']
output = g['output']
if input1 in input_table_num:
value_list[input1] = input_to_enc[str(input1)][str(input_value[c_index])]
c_index += 1
if input2 in input_table_num:
value_list[input2] = input_to_enc[str(input2)][str(input_value[c_index])]
c_index += 1
if c_index == len(input_value):
break
#print(value_list)
return value_list
def dec(value_list, circuit, enc_table, enc_to_org):
for index in range(len(circuit)):
g = circuit[str(index)]
index1 = value_list[g['input1']]
index2 = value_list[g['input2']]
for i in enc_table[str(g['output'])]:
dec = enigma.enigma([index1, index2, 0], i[0])
if hash((index1, index2, dec)) == i[1]:
value_list[g['output']] = dec
break
#print("enc_to_org",enc_to_org)
return enc_to_org[str(value_list[len(value_list)])]
#print(enc_to_org[value_list[len(value_list)]])
'''
for index in range(len(circuit)):
g = circuit[index]
index1 = g['input1']
index2 = g['input2']
value_list[g['output']] = enc_t_table[index][(value_list[index1], value_list[index2])]
#print(enc_to_org)
print(enc_t_table)
#print(value_list)
#print(len(value_list))
print(enc_to_org[value_list[len(value_list)]])
'''
'''
print("encoding to org:")
print(enc_to_org)
print("-------------------------------------------")
for i in enc_t_table:
print(enc_t_table[i])
'''
'''
for i in circuit:
print(circuit[i])
print(input_table)
''' | [
"godofyax@gmail.com"
] | godofyax@gmail.com |
4d33c00a31200e8fa664671bb8c326c32c2bbc54 | 20ccd941834b5fca4b6362290ba5a85cfd867ff3 | /VGG/train.py | c8a8b5573f7ae8e67a315b52017399bace6600d2 | [] | no_license | yjzhao19981027/pytorch-learning | 233c452ae0db9ccaacf59c7dcbe21aae20d3e383 | f9e2c9df0c85a2599701105bd6c5bc9864bf41d9 | refs/heads/master | 2023-07-29T23:22:06.088914 | 2021-09-14T05:27:54 | 2021-09-14T05:27:54 | 406,197,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import time
import torch
from torch import nn, optim
import d2lzh_pytorch as d2l
from model import VGG11
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 经过5个vgg_block, 宽高会减半5次, 变成 224/32 = 7
fc_features = 512 * 7 * 7 # c * w * h
fc_hidden_units = 4096 # 任意
ratio = 8
small_conv_arch = [(1, 1, 64//ratio), (1, 64//ratio, 128//ratio), (2, 128//ratio, 256//ratio),
(2, 256//ratio, 512//ratio), (2, 512//ratio, 512//ratio)]
net = VGG11(small_conv_arch, fc_features // ratio, fc_hidden_units // ratio)
batch_size = 64
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
| [
"630877986@qq.com"
] | 630877986@qq.com |
9f7c8499aee2d5981eb0633c90622742aea200a1 | 4b62774fb7958d8917ff0eca29b105c160aa3408 | /pi3ddemos/Jukebox.py | c978dd3b5bb72aa469313994743424357a5c8e92 | [
"MIT"
] | permissive | talhaibnaziz/projects | 1dd6e43c281d45ea29ee42ad93dd7faa9c845bbd | 95a5a05a938bc06a65a74f7b7b75478694e175db | refs/heads/master | 2022-01-21T08:00:37.636644 | 2021-12-30T01:58:34 | 2021-12-30T01:58:34 | 94,550,719 | 0 | 0 | null | 2021-12-26T02:01:24 | 2017-06-16T14:16:24 | Java | UTF-8 | Python | false | false | 4,318 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
""" Music and animation with changing images. Needs some mp3 files in the
music subdirectory and mpg321 installed. You may need to do some tweaking
with the alsa configuration. On jessie I had to edit a line in
/usr/share/alsa/alsa.conf and sudo amixer cset numid=3 1
to get sound from the 3.5mm jack plug. Google "raspberry pi getting audio working"
"""
import math, random, time, glob, threading
from subprocess import Popen, PIPE, STDOUT
import demo
import pi3d
def _tex_load(tex_list, slot, fName):
tex_list[slot] = pi3d.Texture(fName)
# Setup display and initialise pi3d
DISPLAY = pi3d.Display.create(x=50, y=50, frames_per_second=40)
DISPLAY.set_background(0.4, 0.6, 0.8, 1.0) # r,g,b,alpha
#persp_cam = pi3d.Camera.instance() # default instance camera perspecive view
#setup textures, light position and initial model position
pi3d.Light((0, 5, 0))
#create shaders
shader = pi3d.Shader("star")
flatsh = pi3d.Shader("uv_flat")
post = pi3d.PostProcess("shaders/filter_outline")
#Create textures
tFiles = glob.glob("textures/*.*")
nTex = len(tFiles)
slot = 0
tex_list = [pi3d.Texture(tFiles[slot]), None] #ensure first texture there
slot = 1
#next texture load in background
t = threading.Thread(target=_tex_load, args=(tex_list, slot % 2, tFiles[slot % nTex]))
t.daemon = True
t.start()
#Create shape
myshape = pi3d.MergeShape()
asphere = pi3d.Sphere(sides=32, slices=32)
myshape.radialCopy(asphere, step=72)
myshape.position(0.0, 0.0, 5.0)
myshape.set_draw_details(shader, [tex_list[0]], 8.0, 0.1)
mysprite = pi3d.Sprite(w=15.0, h=15.0)
mysprite.position(0.0, 0.0, 15.0)
mysprite.set_draw_details(flatsh, [tex_list[0]])
# Fetch key presses.
mykeys = pi3d.Keyboard()
pic_next = 5.0
pic_dt = 5.0
tm = 0.0
dt = 0.02
sc = 0.0
ds = 0.001
x = 0.0
dx = 0.001
mFiles = glob.glob("music/*.mp3")
random.shuffle(mFiles)
nMusic = len(mFiles)
iMusic = 0
p = Popen([b"mpg321", b"-R", b"-F", b"testPlayer"], stdout=PIPE, stdin=PIPE)
p.stdin.write(bytearray("LOAD {}\n".format(mFiles[iMusic]), "ascii"))
#p.stdin.write(b"LOAD music/60miles.mp3\n")
p.stdin.flush()
rval, gval, bval = 0.0, 0.0, 0.0
mx, my = 1.0, 1.0
dx, dy = 1.0, 1.0
# Display scene and rotate shape
while DISPLAY.loop_running():
tm = tm + dt
sc = (sc + ds) % 10.0
myshape.set_custom_data(48, [tm, sc, -0.5 * sc])
post.start_capture()
# 1. drawing objects now renders to an offscreen texture ####################
mysprite.draw()
myshape.draw()
post.end_capture()
# 2. drawing now back to screen. The texture can now be used by post.draw()
# 3. redraw these two objects applying a shader effect ###############
#read several lines so the video frame rate doesn't restrict the music
flg = True
while flg:
st_read = time.time()
l = p.stdout.readline()
if (time.time() - st_read) > 0.01:# poss pause in animation waiting for mpg321 info
flg = False
if b'@P' in l:
iMusic = (iMusic + 1) % nMusic
p.stdin.write(bytearray("LOAD {}\n".format(mFiles[iMusic]), "ascii"))
p.stdin.flush()
if b'FFT' in l: #frequency analysis
val_str = l.split()
rval = float(val_str[2]) / 115.0
gval = float(val_str[6]) / 115.0
bval = float(val_str[10]) / 115.0
post.draw({48:rval, 49:gval, 50:bval})
if mx > 3.0:
dx = -1.0
elif mx < 0.0:
dx = 1.0
if my > 5.0:
dy = -1.0
elif my < 0.0:
dy = 1.0
mx += dx * gval / 100.0
my += dy * bval / 50.0
myshape.scale(mx, my, mx)
myshape.rotateIncY(0.6471 + rval)
myshape.rotateIncX(0.513 - bval)
mysprite.rotateIncZ(0.5)
if tm > pic_next:
"""change the pictures and start a thread to load into tex_list"""
pic_next += pic_dt
myshape.set_draw_details(shader, [tex_list[slot % 2]])
mysprite.set_draw_details(flatsh, [tex_list[slot % 2]])
slot += 1
t = threading.Thread(target=_tex_load, args=(tex_list, slot % 2, tFiles[slot % nTex]))
t.daemon = True
t.start()
k = mykeys.read()
if k==112:
pi3d.screenshot("post.jpg")
elif k==27:
mykeys.close()
DISPLAY.destroy()
break
p.stdin.write(b'QUIT\n')
| [
"talhaibnaziz6343@gmail.com"
] | talhaibnaziz6343@gmail.com |
b984ae4d09b777cea27b15594325c8577d5b8fa4 | 141130d04641f37380f977a868d1c5211163e2ae | /supermarket/settings.py | 29d40e12ef2f50003f102dc211bd611f7b05119f | [] | no_license | bronejeffries/supermarket-in-Django | fac78184a7cac9849dc8269286f82280edce8465 | ffa90562d66fb6da22be84089391a8d01c4bf231 | refs/heads/master | 2021-06-11T14:02:54.612270 | 2018-07-31T16:14:09 | 2018-07-31T16:14:09 | 140,300,913 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | """
Django settings for supermarket project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cuc@euyvkx-abyp&(0+-ztq39ia73t&t%2bm)svv1yhjf=ej_p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'login.apps.LoginConfig',
'cashier.apps.CashierConfig',
'manager.apps.ManagerConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'supermarket.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'supermarket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'supermarket',
'USER':'root',
'PASSWORD':'1234567890',
'HOST':'localhost',
'PORT':'',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/'
| [
"brianbronej@me.com"
] | brianbronej@me.com |
a6b9dd390dc1e85b83a1d633966da434e5ea4071 | d4072201c7820d9b18e86e77361a21bbc62deaea | /flowviz/migrations/0006_gislayer.py | 201d135afbc5a21bcfa6a44878c6f417cbc3b130 | [
"Apache-2.0"
] | permissive | eleanxr/watersharingdashboard | 6741a056a9a6602a286671b44173b39fffd7f37e | d3e6bb2717e5e32a13dcf37f30271574a5d97666 | refs/heads/master | 2023-02-10T19:45:06.956049 | 2016-08-11T23:14:46 | 2016-08-11T23:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flowviz', '0005_auto_20151207_1551'),
]
operations = [
migrations.CreateModel(
name='GISLayer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('description', models.TextField()),
('url', models.URLField()),
('project', models.ForeignKey(to='flowviz.Project')),
],
),
]
| [
"wdicharry@tnc.org"
] | wdicharry@tnc.org |
41d74c532a58df6abc54c38bc1792a6a2b0a500b | 51468f7f539ec6c0b421b62fd5ed2b99fb6aba6e | /DSblog/dsproject/__init__.py | a0c9978276e3489a038d55f3a22e019d740207d3 | [] | no_license | Monsy-DS/blogDS | 91740f554f2d4a7b8ff9c52b046430d4c34d4fb8 | 38d7261ae329cb251cbad730bdf1068348e5ffc0 | refs/heads/main | 2023-04-17T08:59:13.982493 | 2023-04-05T18:39:22 | 2023-04-05T18:39:22 | 331,106,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from flask import Flask
import os
app = Flask(__name__)
app.config['SECRET_KEY']='SECRET'
app.static_folder = 'static'
#####################################
###### Blueprints registration ######
#####################################
#from slozka.podslozka.file import nazev_blueprint
#app.register_blueprint(nazev_blueprintu)
from dsproject.core.views import core
app.register_blueprint(core)
| [
"noreply@github.com"
] | noreply@github.com |
9f3fe077a1a4c9a540c6e4a89d2367f7c0d04e60 | f5139b36f9ffe06b140a18f0d91988999aecb737 | /modules/WordLSTM.py | 08942506907844907fc4dd4d766e65da5cd63895 | [] | no_license | yunan4nlp/NNEDUseg_conll | 7a71297d478fbc5e15739003021af4c44f8b8498 | 8ad68b1c88c4499eab638dd9c11d3e446d4ebcb7 | refs/heads/master | 2021-01-07T21:24:14.554746 | 2020-02-21T04:39:33 | 2020-02-21T04:39:33 | 241,823,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from modules.Layer import *
class WordLSTM(nn.Module):
def __init__(self, vocab, config):
super(WordLSTM, self).__init__()
self.config = config
#self.conv = nn.Conv2d(1, config.hidden_size, (config.cnn_window, config.word_dims),
#padding=(config.cnn_window//2, 0), bias=True)
self.lstm = MyLSTM(
input_size=config.word_dims,
hidden_size=config.lstm_hiddens,
num_layers=config.lstm_layers,
batch_first=True,
bidirectional=True,
dropout_in = config.dropout_lstm_input,
dropout_out=config.dropout_lstm_hidden,
)
def forward(self, x_extword_embed, masks):
if self.training:
x_extword_embed = drop_sequence_sharedmask(x_extword_embed, self.config.dropout_emb)
#x_extword_embed = x_extword_embed.unsqueeze(1)
#hidden = torch.tanh(self.conv(x_extword_embed))
#hidden = hidden.squeeze(-1).transpose(1, 2)
outputs, _ = self.lstm(x_extword_embed, masks, None)
outputs = outputs.transpose(1, 0)
#if self.training:
#outputs = drop_sequence_sharedmask(outputs, self.config.dropout_mlp)
return outputs # batch, EDU_num, EDU_len, hidden
| [
"yunan@MacBook-Pro.local"
] | yunan@MacBook-Pro.local |
012c3371720c7a822dba4444f606fbcdffb1a5ce | 560a56dcd1292ad1733ebd72d9cf1398ed13fa8c | /responses/models.py | 6c83f40d0f5041ad934d2688d41e94348afa8e4d | [] | no_license | reactionpowerorg/utils | 655e0f3be256f84f17253ebd8d81cf851cd7ca49 | c9ad36bf79171dd89d91910d81538eb4f4f63515 | refs/heads/main | 2023-04-17T07:35:25.683528 | 2021-05-07T06:41:11 | 2021-05-07T06:41:11 | 365,138,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.db.models.signals import post_save
from django.dispatch import receiver
class Responses(models.Model):
CHOICES = [('1', 1),
('2', 2),
('3', 3),
('4', 4),
('5', 5), ]
morale = models.IntegerField(max_length=1, null=True)
top_goal = models.CharField(max_length=200, null=True)
highlights = models.CharField(max_length=200, null=True)
lowlights = models.CharField(max_length=200, null=True)
w_load = models.CharField(max_length=500, null=True)
goal_obs = models.CharField(max_length=100, null=True)
m_tip = models.CharField(max_length=500, null=True)
date = models.DateTimeField(auto_now=True,null=False)
employee = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
Surname = models.CharField(max_length=120, null=True)
FirstName = models.CharField(max_length=120, null=True)
LastName = models.CharField(max_length=120, null=True)
Role = models.CharField(max_length=20, null=True)
Gender = models.CharField(max_length=10, choices=[
('M', 'Male'),
('F', 'Female'),
])
ProfilePic = models.ImageField(upload_to="ProilePics/", blank=True, null=True)
# def __str__(self):
# return self.user
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| [
"77322089+Reactionpowerdev@users.noreply.github.com"
] | 77322089+Reactionpowerdev@users.noreply.github.com |
8f7d2e670202fe46834fd31c9e7eaf218bed9b04 | ca3d6e6683f4736792fc93352424c6e6d216ab4d | /chapter9/chapter9_app_external_api_test.py | ccdbb3f2d629abb50083ae1be6495e4b66566be2 | [
"MIT"
] | permissive | msg4rajesh/Building-Data-Science-Applications-with-FastAPI | 11ac071583002b15bc955fc3bc72ab86d2800222 | 99b472d8295a57c5a74a63d8184ac053dc4012f2 | refs/heads/main | 2023-07-16T09:48:48.536002 | 2021-08-26T05:02:39 | 2021-08-26T05:02:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import asyncio
from typing import Any, Dict
import httpx
import pytest
from asgi_lifespan import LifespanManager
from fastapi import status
from chapter9.chapter9_app_external_api import app, external_api
class MockExternalAPI:
mock_data = {
"data": [
{
"employee_age": 61,
"employee_name": "Tiger Nixon",
"employee_salary": 320800,
"id": 1,
"profile_image": "",
}
],
"status": "success",
"message": "Success",
}
async def __call__(self) -> Dict[str, Any]:
return MockExternalAPI.mock_data
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
async def test_client():
app.dependency_overrides[external_api] = MockExternalAPI()
async with LifespanManager(app):
async with httpx.AsyncClient(app=app, base_url="http://app.io") as test_client:
yield test_client
@pytest.mark.asyncio
async def test_get_employees(test_client: httpx.AsyncClient):
response = await test_client.get("/employees")
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == MockExternalAPI.mock_data
| [
"fvoron@gmail.com"
] | fvoron@gmail.com |
13b72516be454ffcf0392b2cc7c088e9f1a73bd5 | 6854464ef0805c10ca73ba4c2220ce4f42ee4243 | /auto_wjx_answer.py | d1717eddf9b28191f313084be037c26ec988952a | [] | no_license | tea321000/Auto-Wjx-Answer | dd9c64e612768236d69116d7bd06e5e8e082d9d7 | cf2ec6a49cc87b94453ad0092d779bab911d62e3 | refs/heads/master | 2021-01-02T19:09:17.086168 | 2020-02-08T14:20:50 | 2020-02-08T14:20:50 | 239,758,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,492 | py |
# coding: utf-8
# In[1]:
# get_ipython().system('pip install selenium')
# In[2]:
import argparse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import random
import time
import re
from datetime import datetime
# In[3]:
# fill the first 4 questions
def answer_personal_questions(driver, grade, stunum, name, dormitory):
grade_choice = driver.find_element_by_xpath('//*[@id="divquestion1"]/ul/li[{}]/a'.format(grade))
grade_choice.click()
stunum_input = driver.find_element_by_id('q2')
stunum_input.clear()
stunum_input.send_keys(stunum)
name_input = driver.find_element_by_id('q3')
name_input.clear()
name_input.send_keys(name)
dormitory_input = driver.find_element_by_id('q4')
dormitory_input.clear()
dormitory_input.send_keys(dormitory)
# In[4]:
def parse_questionnaire(driver_path, url, asnwer_dict):
driver = webdriver.Chrome(executable_path=driver_path)
driver.get(url)
# checked if this questionnaire is finished
try:
driver.execute_script('window.parent.PDF_close()')
except:
pass
answer_personal_questions(driver, answer_dict[1], answer_dict[2], answer_dict[3], answer_dict[4])
## fill the last questions for people have no connnection with hubei
for q_num in [5, 7, 8, 15, 17, 19, 21, 22, 23, 28]:
driver.find_element_by_xpath(f'//*[@id="divquestion{q_num}"]/ul/li[2]/a').click()
# check the result page
driver.save_screenshot('填写结果.png')
# submit this page and close
submit = driver.find_element_by_id('submit_button')
submit.click()
# driver.quit()
# In[ ]:
if __name__ == '__main__':
answer_dict = {}
with open('./answer-sheet.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
q_num = int(line.split()[0])
answer = line.split()[-1]
answer_dict[q_num] = answer
parser = argparse.ArgumentParser()
parser.add_argument('url', type=str,
help="the questionary website")
parser.add_argument('-d', '--driver-path', action='store', dest="driver",
default='./chromedriver', help="the driver path")
args = parser.parse_args()
url = args.url
driver_path = args.driver
parse_questionnaire(driver_path, url, answer_dict)
time.sleep(30) | [
"2633129202@qq.com"
] | 2633129202@qq.com |
df1c243259bb643cec2b5c3ab1980bd98628a229 | 78860e7facd130ccc4b7b6226dc19fce672d8d81 | /bikeshare.py | 0adcd08260c17919d4461c6c293deaf60248faab | [] | no_license | rishabh-bisht2302/Rishabh-udacity-project | d2b7d3f3de5c3363be9abb178f6ee31873c54226 | 94a0a723a1721182a444079996b4148cdf718b38 | refs/heads/master | 2020-06-07T08:55:34.038052 | 2019-06-20T21:38:13 | 2019-06-20T21:38:13 | 192,978,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,293 | py | # External sources used
# --> https://pandas.pydata.org/pandas-docs/stable/reference/frame.html
# --> http://unicode.org/emoji/charts/emoji-list.html#1f92d
# --> https://www.geeksforgeeks.org/python-program-to-print-emojis/
import time
import numpy as np
import pandas as pd
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Input: Function takes nothing and returns a tuple.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
# Asks the user to enter the name of city for which they want to explore the data.
# A while loop is used to make sure that the cityname entered is correct and if not,user is asked again to enter the correct city name.
# An emoji is also used source for which is provided at top.
city_name = input('Enter the city name to start exploring(CHICAGO / WASHINGTON / NEW YORK CITY) : ')
while city_name.lower() not in ('chicago','washington','new york city'):
city_name = input('OOPS..!!\U0001F605 Please type CITY NAME from (CHICAGO / WASHINGTON / NEW YORK CITY)..!! Lets start again.')
# Asks the user to enter the month of year for which they want to explore the data.
# A while loop is used to make sure that the month name entered is correct and if not,user is asked again to enter the correct month name.
month_name = input('looking for a particular month or for all months,type(\'all\') ? Enter the name of month or type \'all\' for no filter(in words) : ')
while month_name.lower() not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):
month_name = input('OOPS..!!\U0001F605 Please type correct MONTH NAME..!! Enter again.')
# Asks the user to enter the month of year for which they want to explore the data.
# A while loop is used to make sure that the month name entered is correct and if not,user is asked again to enter the correct month name.
day = input('For a particular day or all days(\'all\' for all days) ? Enter response(in words) : ')
while day.lower() not in ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all'):
day = input('OOPS..!!\U0001F605 Please enter correct DAY (eg. \'sunday\'). Enter again.')
print('\n'*2)
# Function returns the tuple of strings.
return city_name.lower(),month_name.lower(),day.lower()
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# Converting the name of month to respective integer representation.
date = {'januray':1,'february':2,'march':3,'april':4,'may':5,'june':6}
for key,value in date.items():
if key == month.lower():
req_month = value
# Reads the csv file for city entered by user and creates a DataFrame.
file = pd.read_csv(city.replace(' ','_').lower() + '.csv')
# Replacing the Null values with zeros in the DataFrame
file = file.fillna(0)
# Converting Start time to datetime format and creating additional columns to apply filter to the DataFrame.
file['Start Time'] = pd.to_datetime(file['Start Time'])
file['Week_day'] = file['Start Time'].dt.weekday_name
file['Month'] = file['Start Time'].dt.month
file['Hours'] = file['Start Time'].dt.hour
# Filtering the data from DataFrame.
if month != 'all':
file = file[file['Month'] == req_month]
if day != 'all':
file = file[file['Week_day'] == day.title()]
# Filtered DataFrame is returned.
return file
def time_stats(df,month,day):
""" Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
# Marking time as start time in order to calculate the time taken by function to execute.
start_time = time.time()
# Checks if the the filter is applied for month or not.If yes,the function doesnt print the most popular month.
if month == 'all':
# Counts the number of times an unique value appears in the 'Month' column and picks up the maximum of those.
most_pop_month_count = df['Month'].value_counts().max()
# Picks up the value which appears most frequently in the 'Month' column.
most_pop_month = df['Month'].mode()[0]
# Converting the integer representation of month to respective name of month
date = np.array(['Januray','February','March','April','May','June'])
most_pop_month_new = date[most_pop_month-1]
print('Most popular month :',most_pop_month_new,'\tcount :',most_pop_month_count)
# Checks if the the filter is applied for day of week or not.If yes,the function doesnt print the most popular day of week.
if day == 'all':
# Counts the number of times an unique value appears in the 'Week_day' column and picks up the maximum of those.
most_pop_weekday_count = df['Week_day'].value_counts().max()
# Picks up the value which appears most frequently in the 'Week_day' column.
most_pop_weekday = df['Week_day'].mode()[0]
print('Most popular weekday :',most_pop_weekday,'\tcount :',most_pop_weekday_count)
# Counts the number of times an unique value appears in the 'Hours' column and picks up the maximum of those.
most_pop_hour_count = df['Hours'].value_counts().max()
# Picks up the value which appears most frequently in the 'Hours' column.
most_pop_hour = df['Hours'].mode()[0]
print('Most popular hour :',most_pop_hour,'\tcount :',most_pop_hour_count)
# Marking the time as end time for calculation of time taken for execution of above code.
print("\nThis took {} seconds.".format(time.time() - start_time))
# Prints empty space to seperate data printed by function.
print('\n'*2)
def station_stats(df):
"""Takes a DataFrame and displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
# Marking time as start time in order to calculate the time taken by function to execute.
start_time = time.time()
# Counts the number of times an unique value appears in the Start Station column and prints the name of Start Station appearing the most and its count in the column.
pop_start_station_count = df['Start Station'].value_counts().max()
pop_start_station = df['Start Station'].mode()[0]
print('Most popular Start Station :',pop_start_station,'\tcount:',pop_start_station_count)
# Counts the number of times an unique value appears in the End Station column and prints the name of End Station appearing the most and its count in the column.
pop_end_station_count = df['End Station'].value_counts().max()
pop_end_station = df['End Station'].mode()[0]
print('Most popular End Station :',pop_end_station,'\tcount:',pop_end_station_count)
# Combines two columns in DataFrame and prints the most frequent value in this column along with its count in column
df['journey'] = df['Start Station'] + ' TO ' + df['End Station']
most_pop_trip = df['journey'].mode()[0]
max_freq_count = df.groupby(['Start Station','End Station'])['Start Time'].count().max()
print('The most popular frequent trip :',most_pop_trip,'\tcount :',max_freq_count)
# Marking the time as end time for calculation of time taken for execution of above code.
print("\nThis took {} seconds.".format(time.time() - start_time))
# Prints empty space to seperate data printed by function.
print('\n'*2)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
# Marking time as start time in order to calculate the time taken by function to execute.
start_time = time.time()
# Using numpy's sum function to calculate total of trip durations.
total_time = np.sum(df['Trip Duration'])
print('Total travel time duration :',total_time)
# Using numpy's mean function to calculate average trip duration.
mean_time = np.mean(df['Trip Duration'])
print('Mean travel time duration :',mean_time)
# Marking the time as end time for calculation of time taken for execution of above code.
print("\nThis took {} seconds.".format(time.time() - start_time))
# Prints empty space to seperate data printed by function.
print('\n'*2)
def user_stats(df,city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
# Marking time as start time in order to calculate the time taken by function to execute.
start_time = time.time()
# Calculating the number of unique rows in 'User_type' column and printing each type along with their respective count.
user_type = df['User Type'].value_counts()
print('Total SUBSCRIBERS :',user_type['Subscriber'],'\tTotal CUSTOMERS:',user_type['Customer'])
# Conditional statement to check if the city entered by user is 'washington'.If yes,then it displays no data as the columns dont exist in washingtons csv file.If not,then it displays the result.
if city == 'washington':
print('Sorry..!!There is no Birth year and Gender information for Washington city')
else:
# Counts number of rows for each unique value in 'Gender' column and prints each type along with its count in this column.
gender = df['Gender'].value_counts()
print('Total MALES :',gender['Male'],'\tTotal FEMALES :',gender['Female'])
# prints most frequently occuring value in 'Birth Year' as most common birth year,minimum of those as earliest birth year and maximum of those as latest birth year
latest_birth_year = df['Birth Year'].max()
earliest_birth_year = df['Birth Year'].replace([0,None]).min(skipna= True)
common_birth_year = df['Birth Year'].replace([0,None]).mode()[0]
print('Most common birth year :',common_birth_year,'\nEarliest birth year :',earliest_birth_year,'\nLatest birth year :',latest_birth_year)
# Marking the time as end time for calculation of time taken for execution of above code.
print("\nThis took {} seconds.".format(time.time() - start_time))
# Prints empty space to seperate data printed by function.z
print('\n'*2)
def raw_data(df,city):
""" Takes DataFrame and returns row by row data as per input provided by user. """
# Asks user if user want to print individual data and keeps on printing data row by row until user types 'No'.
response = input('Do you want to see individual data ? ')
i = 0
while response.lower() == 'yes' or response.lower() == 'y':
# creates a dictionary to represent the individual data
user = {'id' : df.loc[i][0],
'Start Time' : df['Start Time'][i],
'End Time' : df['End Time'][i],
'Trip Duration(sec)' : df['Trip Duration'][i],
'Start Station' : df['Start Station'][i],
'End Station' : df['End Station'][i],
'User Type' : df['User Type'][i] }
# Ensures that the code prints all information about every user for each city
if city != 'washington':
user['Gender'] = df['Gender'][i]
user['Birth Year'] = df['Birth Year'][i]
# prints information for each raw in the frame as a dictionary
print(user)
response = input('\nDo you want to print individual data ? Enter yes or no : ')
i += 1
return
def main():
# Asks the user if user wants to explore bikeshare and if the input is 'yes',it executes the code,prints the result and asks again until user enters 'no'.
restart = input('\nWould you like to expolre bikeshare data? Enter yes or no.\n')
while restart.lower() == 'yes' or restart.lower() == 'y': # conditional statement to start execution
city, month, day = get_filters() # function is called and returned values are assigned to variables
df = load_data(city, month, day) # function is called and returned value is assigned to variable
time_stats(df,month,day) # function called with three arguments
station_stats(df) # function called
trip_duration_stats(df) # function called
user_stats(df,city) # function called with two argumnets
raw_data(df,city) # function called with two argumnets
restart = input('\nWould you like to restart? Enter yes or no.\n')
if __name__ == '__main__':
# Check if the code is running as main or as imported.If its main,the main function is called.
main() | [
"Rishabh.bisht2302@gmail.com"
] | Rishabh.bisht2302@gmail.com |
ff96d11ffbd2b28520d2005e0854bde218bbd134 | 2dc38cc5ebade80e3b59c4d802271c4edff114e2 | /pyBattleship.py | 7cee55623a5f32b72912eff42963feba1522d80a | [
"Apache-2.0"
] | permissive | awhittle3/pyBattleship | 60db294e3ef807d2eaab6575f0e4bd43a12f9457 | be3b779654e68d355344258a334fcd0bc303d223 | refs/heads/master | 2020-05-20T11:11:01.968796 | 2014-05-30T23:13:16 | 2014-05-30T23:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | import boards
import enemyAI
import playAnchors
def main():
enemyDead = False
playerDead = False
targetingMode = False
target = [99,99] #99, 99 is the null target value
turn = 1
n = boards.selectRand() #Select two random boards
enemyBoard = boards.makeEnemyBoard()
enemyLocations = boards.selectBoard(n[0])
playerBoard = boards.selectBoard(n[1])
#Print legend
print("~ is empty ocean")
print("o are your ships")
print("X are misses")
print("* are hits")
input("Press enter to continue")
print("\n----BATTLESHIP----")
playAnchors.playSong()
while not playerDead and not enemyDead:
boards.printBoards(enemyBoard, playerBoard)
print("Turn " + str(turn))
i = userInput()
#Player choice evaluated
if(i[0] < 0 or i[0] > boards.BOARD_SIZE - 1) or (i[1] < 0 or i[1] > boards.BOARD_SIZE - 1):
print("Oops, that's not even in the ocean.")
elif(enemyBoard[i[0]][i[1]] == "X" or enemyBoard[i[0]][i[1]] == "*"):
print("You guessed that one already.")
elif enemyLocations[i[0]][i[1]] == "o":
#It's a hit!
enemyBoard[i[0]][i[1]] = "*"
else:
#It's a miss
enemyBoard[i[0]][i[1]] = "X"
#Enemy turn
if targetingMode == False:
#Guess a random location
target = enemyAI.guessing(playerBoard)
if target != [99,99]:
targetingMode = True #Ship was hit, target ship
else:
#Try to sink a targeted ship
target = enemyAI.targeting(playerBoard, target)
if target == [99,99]:
targetingMode = False #Set to null, ship was sunk
#Check if either player is dead
if not isAlive(enemyBoard):
enemyDead = True
elif not isAlive(playerBoard):
playerDead = True
turn += 1
#Print game result
if enemyDead:
print("YOU WIN!")
else:
print("YOU LOSE!")
def userInput():
try:
row = int(input("Guess row: ")) - 1
col = int(input("Guess column: ")) - 1
n = [row,col]
except(ValueError):
print("Error: Input is not a number")
n = userInput()
return n
def isAlive(board):
MAX_HITS = 17
count = 0
for line in board:
for space in line:
if space == "*":
count += 1
if count == MAX_HITS:
return False
else:
return True
main()
| [
"awhittle@ualberta.ca"
] | awhittle@ualberta.ca |
51d5cfa038f52b53a7268bc01fad5b2b06dbf3b3 | 5fc947320389121abe4e0052614b7a23e0cf8226 | /pbcommand/schemas/__init__.py | 5f238df1c931e425457d19691407723684f0979f | [] | no_license | tperelmuter/pbcommand | 3e36c5d5a26bdb014e6986126148b946e8db9f3e | ebad4cf0504efa30fab64c4538022f77855d353d | refs/heads/master | 2020-04-07T18:49:27.901173 | 2016-02-09T21:59:00 | 2016-02-09T21:59:00 | 51,413,965 | 0 | 0 | null | 2016-02-10T01:23:31 | 2016-02-10T01:23:31 | null | UTF-8 | Python | false | false | 954 | py | import os
import functools
import avro.schema
from avro.io import validate
SCHEMA_REGISTRY = {}
__all__ = ['validate_pbreport',
'validate_tc',
'validate_rtc',
'SCHEMA_REGISTRY']
def _load_schema(idx, name):
d = os.path.dirname(__file__)
schema_path = os.path.join(d, name)
with open(schema_path, 'r') as f:
schema = avro.schema.parse(f.read())
SCHEMA_REGISTRY[idx] = schema
return schema
RTC_SCHEMA = _load_schema("resolved_tool_contract", "resolved_tool_contract.avsc")
PBREPORT_SCHEMA = _load_schema("pbreport", "pbreport.avsc")
TC_SCHEMA = _load_schema("tool_contract", "tool_contract.avsc")
def _validate(schema, d):
"""Validate a python dict against a avro schema"""
return validate(schema, d)
validate_rtc = functools.partial(_validate, RTC_SCHEMA)
validate_pbreport = functools.partial(_validate, PBREPORT_SCHEMA)
validate_tc = functools.partial(_validate, TC_SCHEMA) | [
"michael.kocher@me.com"
] | michael.kocher@me.com |
ff7e5353de2674b363d6503c65205bd258975026 | dfff7fef4d49266db475856d4c0afef8ca672e00 | /tests/cantfit.py | 54f692c8b57158768e4561a4098cae020b3eafbe | [
"MIT"
] | permissive | funilrys/black | 70a5a251338ab67fed0771ab6ec97cca03aa378b | b4cee97c99d5513ef81fdf2bff1809721662f87d | refs/heads/master | 2020-03-17T14:41:13.259870 | 2018-05-16T05:15:28 | 2018-05-16T05:15:28 | 133,682,656 | 1 | 0 | null | 2018-05-16T14:57:35 | 2018-05-16T14:57:35 | null | UTF-8 | Python | false | false | 2,983 | py | # long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1, 2, 3
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function()
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
# output
# long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
0
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
1
) # with a comment
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1, 2, 3
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
function()
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = (
but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
| [
"lukasz@langa.pl"
] | lukasz@langa.pl |
a89b72dfd3f638a33e4b454b8f2ae58e5cd9155c | ae7b27c5bfabad681f8838b8268cdddb6048cd65 | /Src/correct.py | 5f71bca8b53882c53e93ee7feaf10ae8b48f566e | [] | no_license | Our-NLP/ChinesePronounse | f09fd4433a30662e14fedca8c8f6191b53384db5 | 7f513bdce47eff7a0e542d7dad030daf310d826e | refs/heads/master | 2021-01-10T16:00:05.077703 | 2015-12-11T08:38:52 | 2015-12-11T08:38:52 | 43,517,881 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
import os
class Correct:
def __init__(self):
self.data_dir=os.path.realpath(os.path.dirname(os.path.realpath("__file__"))+"/../Data")
self.meta_dir=self.data_dir+"/MetaData/"
self.output_dir=self.data_dir+"/new_Data/"
self.postaged_dir=self.data_dir+"/postaged/"
def multi_task(self):
for file in os.listdir(self.postaged_dir):
with open(self.meta_dir+file) as correct,open(self.postaged_dir+file) as wrong,open(self.output_dir+file,'w') as out:
correct_list=[line for line in correct]
wrong_list=[line for line in wrong]
if len(correct_list) != len(wrong_list):
print file
for i in range(len(correct_list)):
out.write(correct_list[i][:-1]+'\t'+wrong_list[i].split('\t')[-1])
if __name__=="__main__":
ct=Correct()
#print ct.postaged_dir
ct.multi_task()
| [
"ljfnwpusife@gmail.com"
] | ljfnwpusife@gmail.com |
7b0dbc63cff82f92ee55451cbdba8607fbeb356c | 65a784ef70fce0502a5711201639be5e80908d5b | /exp/tester/run.py | fb0c9b03cf114398651c833a1ccecdfe38561fe7 | [] | no_license | yunshengb/research | 5a93d6b3bc9e60fec152d817a17e9d4864ffdbd5 | 83e726678acaaf7116af5573a8419dc91a843df0 | refs/heads/master | 2020-09-11T01:24:28.235871 | 2016-12-05T05:51:33 | 2016-12-05T05:51:33 | 67,900,433 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | from config import *
from os import system
from glob import glob
import pandas as pd
cmd = ''
if APP_TYPE == 'qa':
data = pd.read_csv(FILE_PATH)
for line in data[FIELD_NAME]:
cmd += 'tq\n'
cmd += line.replace('"', '').strip()
cmd += '\n'
elif APP_TYPE == 'asr':
files = glob(DIR_PATH + '/*.wav')
for i, file_path in enumerate(files):
cmd += 'ta\n'
cmd += file_path
cmd += '\n'
elif APP_TYPE == 'cl':
data = pd.read_csv(FILE_PATH)
for line in data[FIELD_NAME]:
cmd += 'tcl\n'
cmd += line.replace('"', '').strip()
cmd += '\n\n' # no image at this point
else:
raise RuntimeError('Unrecognized app type')
cmd += 'qq'
print(cmd)
system('echo "{}" | ./X'.format(cmd))
| [
"yba@umich.edu"
] | yba@umich.edu |
00b59084a85b1d55078321d160edd0d1e0677e4f | 0b7333006b9aa8ebf2497880baee5a333e8b9ef0 | /cnn/fer2013_to_file.py | 241b5fb5c2eff8c76e21b7d1bfed7afeb82ddf36 | [] | no_license | punkrocker/Deeplearning | 3c6066bc9e437aab0a0601b3ccc1069162803536 | 2d64eadce77aaac5664371593e44b73cee40bad8 | refs/heads/master | 2020-06-28T05:06:17.206052 | 2019-10-22T01:52:50 | 2019-10-22T01:52:50 | 200,148,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | # encoding:utf-8
import pandas as pd
import numpy as np
import scipy.misc as sm
import os
emotions = {
'0': 'anger', # 生气
'1': 'disgust', # 厌恶
'2': 'fear', # 恐惧
'3': 'happy', # 开心
'4': 'sad', # 伤心
'5': 'surprised', # 惊讶
'6': 'normal', # 中性
}
# 创建文件夹
def createDir(dir):
if os.path.exists(dir) is False:
os.makedirs(dir)
def saveImageFromFer2013(file):
# 读取csv文件
faces_data = pd.read_csv(file)
imageCount = 0
# 遍历csv文件内容,并将图片数据按分类保存
for index in range(len(faces_data)):
# 解析每一行csv文件内容
emotion_data = faces_data.loc[index][0]
image_data = faces_data.loc[index][1]
usage_data = faces_data.loc[index][2]
# 将图片数据转换成48*48
data_array = list(map(float, image_data.split()))
data_array = np.asarray(data_array)
image = data_array.reshape(48, 48)
# 选择分类,并创建文件名
dirName = os.path.join('Origin', usage_data)
emotionName = emotions[str(emotion_data)]
# 图片要保存的文件夹
imagePath = os.path.join(dirName, emotionName)
# 创建“用途文件夹”和“表情”文件夹
createDir(dirName)
createDir(imagePath)
# 图片文件名
imageName = os.path.join(imagePath, str(index) + '.jpg')
sm.toimage(image).save(imageName)
imageCount = index
print('总共有' + str(imageCount) + '张图片')
if __name__ == '__main__':
saveImageFromFer2013('fer2013.csv')
| [
"punkrock_2000@163.com"
] | punkrock_2000@163.com |
32ccc301818cba58bc8652cc445d8944ea3d305b | 968df4b59df49396714d46d438057460ea8fea8b | /homeassistant/components/bond/utils.py | 48fbcd8021083d9beb0c69824137731e8342644d | [
"Apache-2.0"
] | permissive | dakesson/core | a71660e265d0742db0d0842f01ac4f3ddf3e074c | c9380d4972e4ec41991eca6e2bc0e4df99b763e7 | refs/heads/master | 2022-11-24T02:14:56.167005 | 2020-07-22T15:45:03 | 2020-07-22T15:45:03 | 282,056,305 | 0 | 0 | Apache-2.0 | 2020-07-23T21:12:02 | 2020-07-23T21:12:02 | null | UTF-8 | Python | false | false | 2,824 | py | """Reusable utilities for the Bond component."""
from typing import List, Optional
from bond import Actions, Bond
class BondDevice:
"""Helper device class to hold ID and attributes together."""
def __init__(self, device_id: str, attrs: dict, props: dict):
"""Create a helper device from ID and attributes returned by API."""
self.device_id = device_id
self.props = props
self._attrs = attrs
@property
def name(self) -> str:
"""Get the name of this device."""
return self._attrs["name"]
@property
def type(self) -> str:
"""Get the type of this device."""
return self._attrs["type"]
def supports_speed(self) -> bool:
"""Return True if this device supports any of the speed related commands."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Actions.SET_SPEED]])
def supports_direction(self) -> bool:
"""Return True if this device supports any of the direction related commands."""
actions: List[str] = self._attrs["actions"]
return bool(
[
action
for action in actions
if action in [Actions.SET_DIRECTION, Actions.TOGGLE_DIRECTION]
]
)
def supports_light(self) -> bool:
"""Return True if this device supports any of the light related commands."""
actions: List[str] = self._attrs["actions"]
return bool(
[
action
for action in actions
if action in [Actions.TURN_LIGHT_ON, Actions.TOGGLE_LIGHT]
]
)
class BondHub:
"""Hub device representing Bond Bridge."""
def __init__(self, bond: Bond):
"""Initialize Bond Hub."""
self.bond: Bond = bond
self._version: Optional[dict] = None
def setup(self):
"""Read hub version information."""
self._version = self.bond.getVersion()
def get_bond_devices(self) -> List[BondDevice]:
"""Fetch all available devices using Bond API."""
device_ids = self.bond.getDeviceIds()
devices = [
BondDevice(
device_id,
self.bond.getDevice(device_id),
self.bond.getProperties(device_id),
)
for device_id in device_ids
]
return devices
@property
def bond_id(self) -> str:
"""Return unique Bond ID for this hub."""
return self._version["bondid"]
@property
def target(self) -> str:
"""Return this hub model."""
return self._version.get("target")
@property
def fw_ver(self) -> str:
"""Return this hub firmware version."""
return self._version.get("fw_ver")
| [
"noreply@github.com"
] | noreply@github.com |
7ef51710bf0d1477fb373d524df4209cae76669a | 38c5417c67c323de11feb03d5b90bdaa28012c87 | /open_canvas.py | 9173d87a5c52fbd35bc7c913c974310ebf9ace85 | [] | no_license | wongu0905/Class | c06145068fcc3951d211635ac3d7a25b62b9b64c | 93c25edd91ed6cfefedf974230deb78ba9e59f10 | refs/heads/master | 2022-12-16T20:17:07.110361 | 2020-09-22T14:45:16 | 2020-09-22T14:45:16 | 294,040,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | import pico2d
pico2d.open_canvas()
| [
"noreply@github.com"
] | noreply@github.com |
f2ed1999ab2fe5b10597c440649a3b93645b82d3 | c1e305171afcd18fdd66a46cbcf81d8dbcc3fd0c | /PyTorch/Py09_dropout.py | 447dd6e594089cf2812c287b60aa6a968b1ae24c | [] | no_license | ANRhine/PyTorch_Tutorial | 2f0d9fcc94dfec37a352b5dcb37fc66738abc37d | 378d03d2f2cfa08ff2040096218078a2e3cd659a | refs/heads/master | 2021-04-07T06:24:28.608860 | 2018-03-16T14:43:03 | 2018-03-16T14:43:03 | 125,291,327 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
-------------------------------------
File name: Py09_dropout.py
Author: Ruonan Yu
Date: 18-1-30
-------------------------------------
"""
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn as nn
torch.manual_seed(1)
N_SAMPLES = 20
N_HIDDEN = 300
LR = 0.001
# fake data
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
x, y = Variable(x), Variable(y)
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
test_x, test_y = Variable(test_x, volatile=True), Variable(test_y, volatile=True)
# overfitting network
net_overfitting = nn.Sequential(
nn.Linear(1, N_HIDDEN),
nn.ReLU(),
nn.Linear(N_HIDDEN, N_HIDDEN),
nn.ReLU(),
nn.Linear(N_HIDDEN, 1)
)
# dropout network
net_dropouted = nn.Sequential(
nn.Linear(1, N_HIDDEN),
nn.Dropout(0.5), # drop 50% of neuron
nn.ReLU(),
nn.Linear(N_HIDDEN, N_HIDDEN),
nn.Dropout(0.5), # drop 50% of neuron
nn.ReLU(),
nn.Linear(N_HIDDEN, 1)
)
print(net_overfitting)
print(net_dropouted)
# training
optimizer_ofit = torch.optim.Adam(net_overfitting.parameters(), lr=LR)
optimizer_drop = torch.optim.Adam(net_dropouted.parameters(), lr=LR)
loss_func = nn.MSELoss()
plt.ion()
for t in range(500):
pred_ofit = net_overfitting(x)
pred_drop = net_dropouted(x)
loss_ofit = loss_func(pred_ofit, y)
loss_drop = loss_func(pred_drop, y)
optimizer_ofit.zero_grad()
optimizer_drop.zero_grad()
loss_ofit.backward()
loss_drop.backward()
optimizer_ofit.step()
optimizer_drop.step()
if t % 10 == 0: # 每10步画一次图
# 将神经网络转换test形式,画好图之后改回训练形式
net_overfitting.eval()
net_dropouted.eval() # 因为drop网络在train的时候和test的时候参数不一样
plt.cla()
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropouted(test_x)
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, r'$overfitting loss=%.4f$' % loss_func(test_pred_ofit, test_y).data[0],
fontdict={'size': 10, 'color': 'red'})
plt.text(0, -1.5, r'$dropout loss=%.4f$' % loss_func(test_pred_drop, test_y).data[0],
fontdict={'size': 10, 'color': 'red'})
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.pause(0.1)
# 将两个网络改回train形式
net_overfitting.train()
net_dropouted.train()
plt.ioff()
plt.show()
| [
"you@example.com"
] | you@example.com |
9393d21961b0043d35b932fd166c21ca22c72e0c | e456cdf76c1419413931d218317d44ea4b7c3fb7 | /demo/django/pokedex/admin.py | fd91861f03cd25790e7dec41cc349aba98f35f27 | [
"MIT"
] | permissive | Nekmo/angular-django | cbbd8bb0c6baeea6e788c5623fb98102b443f1e9 | 0464747806ce4e79571d3a72db0f04e15f0c6e5e | refs/heads/master | 2023-08-27T16:03:10.006482 | 2021-11-08T23:15:14 | 2021-11-08T23:15:14 | 298,419,330 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.contrib import admin
# Register your models here.
from pokedex.models import Specie
@admin.register(Specie)
class SpecieAdmin(admin.ModelAdmin):
pass
| [
"contacto@nekmo.com"
] | contacto@nekmo.com |
3438306ad8e8827a648efd0fd5e6720bccebc58f | 80eef13ae9bcf5116c0b40ff2c6eef5655b8ebd5 | /sharingCode/metaclass/metaclass_login.py | ba778eaaafae527c22106108b5cb97242cdb5899 | [] | no_license | shmily-xiao/python_study | 8c53ff4c8f4bf7cd4990a7dc08a65adb300e7683 | 5bd1f7cf0e11345e18938b8c4439cca578e7d7d6 | refs/heads/master | 2021-01-24T07:55:03.872890 | 2019-03-20T13:04:38 | 2019-03-20T13:04:38 | 93,362,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from types import FunctionType
# 登录装饰器
def login_required(func):
print 'login check logic here'
def wrapper(*args, **kwargs):
print ("i get you", args, kwargs)
# 拿到这些值之后就可以去处理了return func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
class LoginDecorator(type):
def __new__(cls, name, bases, dct):
for name, value in dct.iteritems():
if name not in ('__metaclass__', '__init__', '__module__') and type(value) == FunctionType:
# 除了这几个方法都需要登录的验证
value = login_required(value)
dct[name] = value
return type.__new__(cls, name, bases, dct)
class Operation(object):
__metaclass__ = LoginDecorator
# 需要登陆验证的方法
def delete(self, x):
print 'deleted %s' % str(x)
def add(self, a):
print 'add %s' % str(a)
def main():
op = Operation()
op.delete('test')
print type(op)
if __name__ == '__main__':
main() | [
"wangzaijun1234@126.com"
] | wangzaijun1234@126.com |
6e8cba24b2548bcd9d286dde17fd656f2d2b9c88 | a8b023b73a400e7a7c6bb0d156e011df87b0b0b4 | /src/main/resources/ReadEDA.py | 65a1142d3388b46e0824deb1a5113d676bc0e5e2 | [] | no_license | makifcetinkaya/storm-simple | a5c998be004ea3ede2f05cd2f9b42a906d6f47e4 | 92f5098544a9de2365580cd9be0c98901289579a | refs/heads/master | 2021-01-06T20:45:36.261344 | 2013-02-20T16:55:11 | 2013-02-20T16:55:11 | 6,944,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | from edatoolkit import qLogFile
import os, sys
rpf = 5000 # read per file
edafile = qLogFile(sys.argv[1])
eda = edafile.EDA()
acc_x = edafile.ACC_X()
acc_y = edafile.ACC_Y()
acc_z = edafile.ACC_Z()
temp = edafile.Temperature()
# assume the lengths are the same
num_reads = len(eda)
num_files = num_reads/read_per_file
index = 0
while index < num_reads:
limit = index+rpf if index+rpf < num_reads else num_reads-1
eda_c = eda[index:limit]
acc_x_c = acc_x[index:limit]
acc_y_c = acc_y[index:limit]
acc_z_c = acc_z[index:limit]
temp_c = temp[index:limit]
f_name =
index = index + rpf
print "|".join([str(x) for x in edafile.EDA()]) #Access list of float values for EDA
print "|".join([str(x) for x in edafile.ACC_X()]) #Access list of float values for X
print "|".join([str(x) for x in edafile.ACC_Y()]) #Access list of float values for Y
print "|".join([str(x) for x in edafile.ACC_Z()]) #Access list of float values for Z
print "|".join([str(x) for x in edafile.Temperature()]) #Access list of float values for Temperature
print "Start: " + edafile.startTime.isoformat(" ")
print "End: " + edafile.endTime.isoformat(" ")
edafile.save("Copy.eda")
| [
"makif@mit.edu"
] | makif@mit.edu |
368438d6bd6eb2e764a63f7c2983f6a8380944e8 | 80775c192c7084171a0371b0fe14330b8cd89f0f | /stickerizer/emojis.py | 7070fbc525378011ade618d9b44d039bdcc88f9a | [
"MIT"
] | permissive | vanyakosmos/face2sticker | 5435ddbbc123c782a6501a78f6142e1ce88f9bc7 | 7b82eb12dd3e4c54c5033caee77f57b751f637b8 | refs/heads/master | 2021-09-13T07:40:51.156215 | 2018-04-26T17:16:24 | 2018-04-26T17:16:24 | 105,321,918 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | import numpy as np
from emotion_clf.emotion import load_clf, vectorize, emotions
clf = load_clf('emotion_clf/clf2.pkl')
def associate_emojis(face_landmarks):
emotions_probs = predict_probabilities(face_landmarks)
emoji = map_emoji(emotions_probs)
return emoji
def predict_probabilities(face_landmarks: dict):
landmarks = []
for points in face_landmarks.values():
landmarks.extend(points)
vector = vectorize(landmarks)
data = np.array([vector])
res = clf.predict_proba(data)[0]
probs = {}
for i, e in enumerate(emotions):
probs[e] = res[i]
return probs
def map_emoji(emotions_prob: dict):
emojis = {
'😡': {
'anger': 10,
},
'😒': {
'contempt': 10,
},
'😣': {
'disgust': 10,
},
'😱': {
'fear': 10,
},
'😀': {
'happiness': 10,
},
'😢': {
'sadness': 10,
},
'😮': {
'surprise': 10,
},
}
max_s = None
result = '🌚'
for emoji, ems in emojis.items():
s = sum([ems.get(e, 1) * emotions_prob[e] for e in emotions])
if not max_s or s > max_s:
max_s = s
result = emoji
return result
| [
"bachynin.i@gmail.com"
] | bachynin.i@gmail.com |
89e026a18c52f389d46597ba589fee07cc32a352 | d44d33899aaab3d2a8b693b648701d49810aca12 | /cip5-multiprofile-wave.py | e001a331576902efdc7df62b78d3e40a59f81237 | [] | no_license | izham-sugita/CIP | 208eee2e108a910abd3a137083638244b8f60303 | a0cd77531a34ad32a0cebeb6069123e89aceb0b5 | refs/heads/master | 2021-06-27T14:51:45.696969 | 2021-01-07T11:44:04 | 2021-01-07T11:44:04 | 204,810,048 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,796 | py | import numpy as np
import matplotlib.pyplot as plt
#Changing the default size
#fig_size = plt.rcParams["figure.figsize"]
#fig_size[0] = 20
#fig_size[1] = 16
#plt.rcParams["figure.figsize"] = fig_size
imax = 2001
imax = int( input("Enter imax ") )
length = 2.0 #-1<=x<=1
dx = length/(imax-1)
u = np.ndarray((imax),dtype=np.float64)
un = np.ndarray((imax),dtype=np.float64)
ud1 = np.zeros_like(u)
ud1n = np.zeros_like(u)
ud2 = np.zeros_like(u)
ud2n = np.zeros_like(u)
x = np.ndarray((imax),dtype=np.float64)
'''
for i in range(imax):
x[i] = i*dx
u[i] = 0.0
un[i] =0.0
if x[i] >= 4.0 and x[i] <= 6.0:
u[i] = 1.0
un[i]=1.0
'''
u[:] = 0.0
un[:] = 0.0
#multiple wave profile
for i in range(imax):
x[i] = -1.0 + i*dx
if x[i] >=-0.8 and x[i] <=-0.6:
u[i] = np.exp( -np.log(2.0)*(x[i]+0.7)**2 / 0.0009 )
un[i] = u[i]
elif x[i] >=-0.5 and x[i] <=-0.2:
u[i] = 1.0
un[i] = u[i]
elif x[i] >=0.0 and x[i] <=0.2:
u[i] = 1.0 - abs(10.0*x[i] - 1.0)
un[i] = u[i]
elif x[i] >=0.4 and x[i] <=0.6:
u[i] = np.sqrt( 1.0 - 100.0*(x[i] - 0.5)**2 )
un[i] = u[i]
#Initiate derivatives value
for i in range( 1, imax-1 ):
ud1[i] = 0.5*(u[i+1] - u[i-1])/dx
for i in range( 1, imax-1 ):
ud2[i] = 0.5*(ud1[i+1] - ud1[i-1])/dx
dt = np.float64(input("Enter dt, dx=%s\n "%dx ))
elapsed = 10.0
itermax = int( elapsed/dt )-int(elapsed/2.0) #adjusted timestep; don't know why
print("Maximum iteration: ", itermax)
c = 1.0
c = float(input("Enter c, +1.0 or -1.0 "))
alpha = c*dt/dx
eps = 1.0e-6
uexact = np.zeros_like(u)
'''
#calculating exact solution
for i in range(imax):
r1 = itermax*dt + 4.0
r2 = r1 + (6.0 - 4.0) #did this on purpose, a reminder
if x[i] >=r1 and x[i] <= r2:
uexact[i] = 1.0
'''
uexact[:] = u[:]
#matrix A
up = -np.sign(c)
A = np.array( [ [ (up*dx)**5, (up*dx)**4, (up*dx)**3],
[5.0*(up*dx)**4, 4.0*(up*dx)**3, 3.0*(up*dx)**2],
[20.0*(up*dx)**3, 12.0*(up*dx)**2, 6.0*up*dx] ] )
coef = np.array( [0.0, 0.0, 0.0] )
b = np.array( [0.0, 0.0, 0.0] )
xx = -c*dt
steps = 1
eps = 1.0e-8
phi = np.zeros_like(u)
for iter in range(itermax):
for i in range(1,imax-1):
up = -np.sign(c)
iup = i + int(up)
xx = -c*dt
b[0] = ( u[iup] - u[i] ) -0.5*ud2[i]*dx*dx - ud1[i]*up*dx
b[1] = ( ud1[iup] - ud1[i] ) - ud2[i]*up*dx
b[2] = ud2[iup] - ud2[i]
coef = np.linalg.solve(A, b)
a0 = coef[0]
a1 = coef[1]
a2 = coef[2]
a3 = ud2[i]*0.5
a4 = ud1[i]
#limiter
udif = ( u[iup] - u[i] )/dx*up
#minmod limiter
ratio = (u[i] - u[i-1]) / (u[i+1] - u[i] + eps)
phi0 = min(10.0*dx, ratio) #default is 1.0
phi[iup] = max(0.0, phi0)
#phi[iup] = 0.0
#van Leer (continuous function) #very diffusive
#ratio = (u[i] - u[i-1]) / (u[i+1] - u[i] + eps)
#phi[iup] = (ratio + abs(ratio)) / (1.0 + ratio)
#un[i] = a0*xx**5 + a1*xx**4 + a2*xx**3 + a3*xx**2 + a4*xx + u[i]
un[i] = u[i] + (1.0-phi[iup])*(a4*xx + a3*xx**2 + a2*xx**3 + a1*xx**4 + a0*xx**5) \
+ phi[iup]*(udif*xx)
ud1n[i] = (1.0 - phi[iup])*( 5.0*a0*xx**4 + 4.0*a1*xx**3 + 3.0*a2*xx**2 + 2.0*a3*xx \
+ ud1[i] ) + phi[iup]*udif
# weight 0.98, 0.01 is the least diffusive
#putting weight only on the first derivative
#un[i] = u[i] + (1.0 - phi[iup])*(a4*xx) + a3*xx**2 + a2*xx**3 + a1*xx**4 + a0*xx**5 \
# + phi[iup]*(udif*xx)
#ud1n[i] = 5.0*a0*xx**4 + 4.0*a1*xx**3 + 3.0*a2*xx**2 + 2.0*a3*xx \
# + (1.0 - phi[iup])*ud1[i] + phi[iup]*udif
#the second derivative is not affected
ud2n[i] = 20.0*a0*xx**3 + 12.0*a1*xx**2 + 6.0*a2*xx + ud2[i]
#update periodic BC
u[0] = un[imax-2]
ud1[0] = ud1n[imax-2]
ud2[0] = ud2n[imax-2]
u[imax-1] = un[imax-2]
ud1[imax-1] = ud1n[imax-2]
ud2[imax-1] = ud2n[imax-2]
for i in range(1, imax-1):
u[i] = un[i]
ud1[i] = ud1n[i]
ud2[i] = ud2n[i]
#update periodic BC
#u[imax-1] = un[imax-2]
#ud1[imax-1] = ud1n[imax-2]
#ud2[imax-1] = ud2n[imax-2]
#u[0] = un[imax-2]
#ud1[0] = ud1n[imax-2]
#ud2[0] = ud2n[imax-2]
'''
#update
u[:] = un[:]
ud1[:] = ud1n[:]
ud2[:] = ud2n[:]
'''
#if iter%steps == 0:
# num = str(iter)
# filename = "./data1D/f"+num.zfill(5)+".csv"
# fp = open(filename, "w")
# fp.write("x, u\n")
# for i in range(imax):
# str1 = str(x[i])
# str2 = str(u[i])
# comma = ","
# nextline = "\n"
# strall = str1+comma+str2+nextline
# fp.write(strall)
# fp.close()
current = iter*dt + dt
display = "t = %.4f"%(current)
phi[:] = 0.0
current = iter*dt + dt
display = "t = %.4f"%(current)
#plt.axis([0.0, 10.0, -0.5, 1.5 ] )
plt.axis([-2.0, 2.0, -0.5, 1.5 ] )
plt.title(display)
plt.ylabel("U")
plt.xlabel("x")
plt.plot(x,u,'bo-')
plt.pause(0.001)
plt.clf() #clear drawing
filename = "final.png"
#plt.axis([0.0, 10.0, -0.5, 1.5 ] )
plt.axis([-2.0, 2.0, -0.5, 1.5 ] )
plt.plot(x,u, 'bo-', x, uexact,'kv-')
plt.title(display)
plt.ylabel("U")
plt.xlabel("x")
plt.savefig(filename)
plt.show()
#plt.show(block=False)
#plt.pause(3)
#plt.close()
filename = "cip5-final.csv"
fp = open(filename, "w")
fp.write("x, u\n")
for i in range(imax):
str1 = str(x[i])
str2 = str(u[i])
comma = ","
nextline = "\n"
strall = str1+comma+str2+nextline
fp.write(strall)
fp.close()
| [
"sugita5019@gmail.com"
] | sugita5019@gmail.com |
ba555204a96171028853421c9e88db084f7bdb1e | cd5f75c051975e888cb86fea472b0108c747ba44 | /singlepage2/singlepage2/settings.py | 25c361d2e03441a6ebc0df3b84ef1423cb4f4499 | [] | no_license | astafanous/User-Interfaces | 68a53364c783f1a1d9e6a25a7d2c40b740f568f6 | 8ec84424394f14bf27228d02dda24944641557a4 | refs/heads/master | 2023-04-02T02:15:28.474982 | 2021-04-08T09:41:43 | 2021-04-08T09:41:43 | 348,271,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | """
Django settings for singlepage2 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3rw$v0j9g)dm*g_vlf=0q4)wkog96=7bd24pd2afo@at7f%n9w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'singlepage',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'singlepage2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'singlepage2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"astafanousf@yahoo.com"
] | astafanousf@yahoo.com |
73d632b8868d43e2ed4fc736ba9368f1e9914594 | a16999aa79d39b1f6a1b9997cf7e2c8347a9b9e3 | /module.py | 84f6cd567fa0495a03905521f45c7454560f4392 | [] | no_license | song-ll/snippet | fa1d1b0a59c3b14bd5e744a4431e86cc4f7c0bf3 | b07a5fcdb00f15ac9fe345a548eaf7255c75a8df | refs/heads/master | 2021-07-14T03:50:41.278098 | 2017-10-19T23:05:56 | 2017-10-19T23:05:56 | 107,595,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | # -*- coding: utf-8 -*-
# @Author: Song
# @Date: 2017-06-24 16:21:15
# @Last Modified by: Song
# @Last Modified time: 2017-07-09 02:59:59
import os
from datetime import datetime
#print (dir(os))
print("current working directory:")
print (os.getcwd())
print("changed working directory:")
os.chdir ('C:/Intel/cc_android_2016.0.036/bin/intel64')
print (os.getcwd())
print (os.listdir('C:/Intel/cc_android_2016.0.036/bin'))
os.chdir('C:/Python36/')
os.mkdir ('osModule/file')
os.rmdir ('osModule/file')
os.removedirs ('osModule/file')
os.makedirs ('osModule/file')
os.listdir ('C:/Python34/osModule')
os.chdir('C:/Python34/osModule')
print (os.getcwd())
print(os.listdir())
#os.rename('C:\Python34\intel.PNG','C:\Python34\intel.JPG')
print ('Logo file : intel.JPG has properties as:')
print (os.stat('C:\Python36\intel.JPG'))
modify_time = os.stat('C:\Python36\intel.JPG').st_mtime
create_time = os.stat('C:\Python36\intel.JPG').st_ctime
print (datetime.fromtimestamp(modify_time))
print (datetime.fromtimestamp(create_time))
print (modify_time)
print (create_time)
for dirpath, dirname, filenames in os.walk('C:/Users/Song/Desktop'):
print ('Current path:', dirpath)
print ('list directories: ', dirname)
print ('list files under the directories:', filenames)
os.chdir ('C:/Users/Song/Desktop')
print (os.environ.get('Desktop'))
#print (os.environ.get('HOME'))
#file_path = os.path.join(os.environ.get(os.getcwd()), 'Meastro.sln')
#file_path = os.path.join(os.environ.get(os.getcwd()), 'Meastro.sln')
#print (file_path)
print (os.path.splitext('C:/Ruby23/bin/comics.txt'))
print (dir(os.path))
| [
"noreply@github.com"
] | noreply@github.com |
35514cba717b052fe2dd9fe516d0221dc51e4212 | 19422d728737921856cada03941a7b802569b9e0 | /src/extract_ngrams.py | 2147944240d15ccba23a0c7e581de6b6f5e45e07 | [] | no_license | eneskeles/re-comma | 1d63ecad274788c258fffc91febc43a6a4ffbf70 | a5c42fd6ed53687ec5bce6a29877afa575db794a | refs/heads/master | 2020-04-19T13:41:14.015342 | 2019-01-29T20:41:03 | 2019-01-29T20:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from modify_ngrams import modify_ngrams
from nltk.corpus import stopwords
from nltk import ngrams
import os,re
import string
def get_words(base_path, filename):
#print("inside get_words")
a = set(get_ngrams(base_path, filename, 1))
return list(a)
def get_ngram_lists(base_path, modified_movies, modification_rate, N=3):
#print("inside get_ngram_lists")
filenames = os.listdir(base_path)
ngram_lists = []
for count, filename in enumerate(filenames):
if count in modified_movies:
ngram_lists.append(get_ngrams(base_path, filename, N, modification_rate))
ngram_lists.append(get_ngrams(base_path, filename, N))
return ngram_lists
def get_ngrams(base_path, filename, N=3, modification_rate=0):
#print("inside get_ngrams")
with open(os.path.join(base_path, filename), 'r') as myfile:
data = myfile.read().lower()
data = re.sub('[' + string.punctuation + ']+', '', data)
data = re.sub('[\s]+',' ', data)
data = data.strip().split(' ')
stop_words = set(stopwords.words('english'))
sw_removed = []
for word in data:
if word not in stop_words:
sw_removed.append(word)
data = sw_removed
ngram_tuples = list(ngrams(data, N))
movie_ngrams = []
for ngram_tuple in ngram_tuples:
movie_ngrams.append(" ".join([i for i in ngram_tuple]))
if modification_rate != 0:
return modify_ngrams(modification_rate, movie_ngrams)
return movie_ngrams | [
"noreply@github.com"
] | noreply@github.com |
8a16b83756f3b6f2cc7e3bf1f7c8a50c5bc43058 | f171072a6390997ea4cf398f32ff3c7b7680af3a | /bumerang/apps/video/albums/migrations/0005_auto__add_field_videoalbum_created.py | 598fadaa544415799e09f3de533bda105c00bcce | [] | no_license | onewaterdrop/bumerang | f1ab153d033072c49de2edb976fa761bd4293bba | 0466c8b37ad1073d7ba4fc4dc00a5c6debb343a7 | refs/heads/master | 2021-01-15T23:41:23.379010 | 2014-06-23T07:57:48 | 2014-06-23T07:57:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,622 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VideoAlbum.created'
db.add_column('albums_videoalbum', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'VideoAlbum.created'
db.delete_column('albums_videoalbum', 'created')
models = {
'albums.photoalbum': {
'Meta': {'object_name': 'PhotoAlbum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['albums.PhotoCategory']", 'null': 'True', 'blank': 'True'}),
'cover': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photo.Photo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'published_in_archive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'albums.photocategory': {
'Meta': {'ordering': "('sort_order', 'title')", 'object_name': 'PhotoCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'albums.videoalbum': {
'Meta': {'object_name': 'VideoAlbum'},
'cover': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.Video']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'photo.photo': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Photo'},
'access': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'album': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '255', 'to': "orm['albums.PhotoAlbum']", 'null': 'True', 'blank': 'True'}),
'authors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'festivals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photo.PhotoGenre']", 'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'published_in_archive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'teachers': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2011', 'null': 'True', 'blank': 'True'})
},
'photo.photogenre': {
'Meta': {'ordering': "('sort_order', 'title')", 'object_name': 'PhotoGenre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'video.video': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Video'},
'access': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'album': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '255', 'to': "orm['albums.VideoAlbum']", 'null': 'True', 'blank': 'True'}),
'authors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.VideoCategory']", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'festivals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['video.VideoGenre']", 'null': 'True', 'blank': 'True'}),
'hq_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_broadcast_lists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'published_in_archive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'teachers': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2012', 'null': 'True', 'blank': 'True'})
},
'video.videocategory': {
'Meta': {'ordering': "('sort_order', 'title')", 'object_name': 'VideoCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'video.videogenre': {
'Meta': {'ordering': "('sort_order', 'title')", 'object_name': 'VideoGenre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['albums'] | [
"va.bolshakov@gmail.com"
] | va.bolshakov@gmail.com |
c44a67a3eaabc76d6e5635f62a79a69aa80faa77 | e5a511e346f5be8a82fe9cb2edf457aa7e82859c | /PythonNEW/Practice/StringRemoveExistingIdentitaion.py | f66992651f064d1098bc0a3e95b04ea1ee0ff896 | [] | no_license | nekapoor7/Python-and-Django | 8397561c78e599abc8755887cbed39ebef8d27dc | 8fa4d15f4fa964634ad6a89bd4d8588aa045e24f | refs/heads/master | 2022-10-10T20:23:02.673600 | 2020-06-11T09:06:42 | 2020-06-11T09:06:42 | 257,163,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | """ Write a Python program to remove existing indentation from all of the lines in a given text."""
import textwrap
sample_text = '''
Python is a widely used high-level, general-purpose, interpreted,
dynamic programming language. Its design philosophy emphasizes
code readability, and its syntax allows programmers to express
concepts in fewer lines of code than possible in languages such
as C++ or Java.
'''
text = textwrap.dedent(sample_text)
print(text) | [
"neha.kapoor070789@gmail.com"
] | neha.kapoor070789@gmail.com |
f6d6555a8ba6236ab372c46d3874d38c6e764625 | db7660d3541c26b418ea84ca08fbf41f9ebd726b | /brax/jumpy.py | 7486fd1ac0b8ac9b44e57f3f0ed88c4c3ca5f7a4 | [
"Apache-2.0"
] | permissive | proceduralia/brax | 8727ada08184fe9f60356d17a15ea671df0906d6 | d54dc479a32e5e99641cde921c7988d69cd5bb7b | refs/heads/main | 2023-08-27T15:39:44.618414 | 2021-11-08T21:35:42 | 2021-11-08T21:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,680 | py | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=redefined-builtin
"""Numpy backend for JAX that is called for non-jit/non-jax arrays."""
from typing import Any, Callable, List, Optional, Sequence, Tuple, TypeVar, Union
import jax
from jax import core
from jax import numpy as jnp
import numpy as onp
ndarray = Union[onp.ndarray, jnp.ndarray] # pylint:disable=invalid-name
tree_map = jax.tree_map # works great with jax or numpy as-is
pi = onp.pi
inf = onp.inf
float32 = onp.float32
int32 = onp.int32
def _in_jit() -> bool:
"""Returns true if currently inside a jax.jit call."""
return core.cur_sublevel().level > 0
def _which_np(*args):
"""Returns np or jnp depending on args."""
for a in args:
if isinstance(a, jnp.ndarray):
return jnp
return onp
F = TypeVar('F', bound=Callable)
def vmap(fun: F, include: Optional[Sequence[bool]] = None) -> F:
"""Creates a function which maps ``fun`` over argument axes."""
if _in_jit():
in_axes = 0
if include:
in_axes = [0 if inc else None for inc in include]
return jax.vmap(fun, in_axes=in_axes)
def _batched(*args):
args_flat, args_treedef = jax.tree_flatten(args)
vargs, vargs_idx = [], []
rets = []
if include:
for i, (inc, arg) in enumerate(zip(include, args_flat)):
if inc:
vargs.append(arg)
vargs_idx.append(i)
else:
vargs, vargs_idx = list(args_flat), list(range(len(args_flat)))
for zvargs in zip(*vargs):
for varg, idx in zip(zvargs, vargs_idx):
args_flat[idx] = varg
args_unflat = jax.tree_unflatten(args_treedef, args_flat)
rets.append(fun(*args_unflat))
return jax.tree_map(lambda *x: onp.stack(x), *rets)
return _batched
Carry = TypeVar('Carry')
X = TypeVar('X')
Y = TypeVar('Y')
def scan(f: Callable[[Carry, X], Tuple[Carry, Y]],
init: Carry,
xs: X,
length: Optional[int] = None,
reverse: bool = False,
unroll: int = 1) -> Tuple[Carry, Y]:
"""Scan a function over leading array axes while carrying along state."""
if _in_jit():
return jax.lax.scan(f, init, xs, length, reverse, unroll)
else:
xs_flat, xs_tree = jax.tree_flatten(xs)
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
for i in maybe_reversed(range(length)):
xs_slice = [x[i] for x in xs_flat]
carry, y = f(carry, jax.tree_unflatten(xs_tree, xs_slice))
ys.append(y)
stacked_y = jax.tree_map(lambda *y: onp.vstack(y), *maybe_reversed(ys))
return carry, stacked_y
def take(tree: Any, i: Union[ndarray, Sequence[int]], axis: int = 0) -> Any:
"""Returns tree sliced by i."""
np = _which_np(i)
if isinstance(i, list) or isinstance(i, tuple):
i = np.array(i, dtype=int)
return jax.tree_map(lambda x: np.take(x, i, axis=axis, mode='clip'), tree)
def norm(x: ndarray,
axis: Optional[Union[Tuple[int, ...], int]] = None) -> ndarray:
"""Returns the array norm."""
return _which_np(x, axis).linalg.norm(x, axis=axis)
def index_update(x: ndarray, idx: ndarray, y: ndarray) -> ndarray:
"""Pure equivalent of x[idx] = y."""
if _which_np(x) is jnp:
return x.at[idx].set(y)
x = onp.copy(x)
x[idx] = y
return x
def safe_norm(x: ndarray,
axis: Optional[Union[Tuple[int, ...], int]] = None) -> ndarray:
"""Calculates a linalg.norm(x) that's safe for gradients at x=0.
Avoids a poorly defined gradient for jnp.linal.norm(0) see
https://github.com/google/jax/issues/3058 for details
Args:
x: A jnp.array
axis: The axis along which to compute the norm
Returns:
Norm of the array x.
"""
np = _which_np(x)
if np is jnp:
is_zero = jnp.allclose(x, 0.)
# temporarily swap x with ones if is_zero, then swap back
x = jnp.where(is_zero, jnp.ones_like(x), x)
n = jnp.linalg.norm(x, axis=axis)
n = jnp.where(is_zero, 0., n)
else:
n = onp.linalg.norm(x, axis=axis)
return n
def any(a: ndarray, axis: Optional[int] = None) -> ndarray:
"""Test whether any array element along a given axis evaluates to True."""
return _which_np(a).any(a, axis=axis)
def all(a: ndarray, axis: Optional[int] = None) -> ndarray:
"""Test whether all array elements along a given axis evaluate to True."""
return _which_np(a).all(a, axis=axis)
def mean(a: ndarray, axis: Optional[int] = None) -> ndarray:
"""Compute the arithmetic mean along the specified axis."""
return _which_np(a).mean(a, axis=axis)
def arange(start: int, stop: int) -> ndarray:
"""Return evenly spaced values within a given interval."""
return _which_np().arange(start, stop)
def dot(x: ndarray, y: ndarray) -> ndarray:
"""Returns dot product of two arrays."""
return _which_np(x, y).dot(x, y)
def outer(a: ndarray, b: ndarray) -> ndarray:
"""Compute the outer product of two vectors."""
return _which_np(a, b).outer(a, b)
def matmul(x1: ndarray, x2: ndarray) -> ndarray:
"""Matrix product of two arrays."""
return _which_np(x1, x2).matmul(x1, x2)
def inv(a: ndarray) -> ndarray:
"""Compute the (multiplicative) inverse of a matrix."""
return _which_np(a).linalg.inv(a)
def square(x: ndarray) -> ndarray:
"""Return the element-wise square of the input."""
return _which_np(x).square(x)
def repeat(a: ndarray, repeats: Union[int, ndarray]) -> ndarray:
"""Repeat elements of an array."""
return _which_np(a, repeats).repeat(a, repeats=repeats)
def floor(x: ndarray) -> ndarray:
"""Returns the floor of the input, element-wise.."""
return _which_np(x).floor(x)
def cross(x: ndarray, y: ndarray) -> ndarray:
"""Returns cross product of two arrays."""
return _which_np(x, y).cross(x, y)
def sin(angle: ndarray) -> ndarray:
"""Returns trigonometric sine, element-wise."""
return _which_np(angle).sin(angle)
def cos(angle: ndarray) -> ndarray:
"""Returns trigonometric cosine, element-wise."""
return _which_np(angle).cos(angle)
def arctan2(x1: ndarray, x2: ndarray) -> ndarray:
"""Returns element-wise arc tangent of x1/x2 choosing the quadrant correctly."""
return _which_np(x1, x2).arctan2(x1, x2)
def arccos(x: ndarray) -> ndarray:
"""Trigonometric inverse cosine, element-wise."""
return _which_np(x).arccos(x)
def logical_not(x: ndarray) -> ndarray:
"""Returns the truth value of NOT x element-wise."""
return _which_np(x).logical_not(x)
def multiply(x1: ndarray, x2: ndarray) -> ndarray:
"""Multiply arguments element-wise."""
return _which_np(x1, x2).multiply(x1, x2)
def minimum(x1: ndarray, x2: ndarray) -> ndarray:
"""Element-wise minimum of array elements."""
return _which_np(x1, x2).minimum(x1, x2)
def amin(x: ndarray) -> ndarray:
"""Returns the minimum along a given axis."""
return _which_np(x).amin(x)
def exp(x: ndarray) -> ndarray:
"""Returns the exponential of all elements in the input array."""
return _which_np(x).exp(x)
def sign(x: ndarray) -> ndarray:
"""Returns an element-wise indication of the sign of a number."""
return _which_np(x).sign(x)
def sum(a: ndarray, axis: Optional[int] = None):
"""Returns sum of array elements over a given axis."""
return _which_np(a).sum(a, axis=axis)
def random_prngkey(seed: int) -> ndarray:
"""Returns a PRNG key given a seed."""
if _which_np() is jnp:
return jax.random.PRNGKey(seed)
else:
rng = onp.random.default_rng(seed)
return rng.integers(low=0, high=2**32, dtype='uint32', size=2)
def random_uniform(rng: ndarray,
shape: Tuple[int, ...] = (),
low: Optional[float] = 0.0,
high: Optional[float] = 1.0) -> ndarray:
"""Sample uniform random values in [low, high) with given shape/dtype."""
if _which_np(rng) is jnp:
return jax.random.uniform(rng, shape=shape, minval=low, maxval=high)
else:
return onp.random.default_rng(rng).uniform(size=shape, low=low, high=high)
def random_split(rng: ndarray, num: int = 2) -> ndarray:
"""Splits a PRNG key into num new keys by adding a leading axis."""
if _which_np(rng) is jnp:
return jax.random.split(rng, num=num)
else:
rng = onp.random.default_rng(rng)
return rng.integers(low=0, high=2**32, dtype='uint32', size=(num, 2))
def segment_sum(data: ndarray,
segment_ids: ndarray,
num_segments: Optional[int] = None) -> ndarray:
"""Computes the sum within segments of an array."""
if _which_np(data, segment_ids) is jnp:
s = jax.ops.segment_sum(data, segment_ids, num_segments)
else:
if num_segments is None:
num_segments = onp.amax(segment_ids) + 1
s = onp.zeros((num_segments,) + data.shape[1:])
onp.add.at(s, segment_ids, data)
return s
def top_k(operand: ndarray, k: int) -> ndarray:
"""Returns top k values and their indices along the last axis of operand."""
if _which_np(operand) is jnp:
return jax.lax.top_k(operand, k)
else:
ind = onp.argpartition(operand, -k)[-k:]
return operand[ind], ind
def stack(x: List[ndarray], axis=0) -> ndarray:
"""Join a sequence of arrays along a new axis."""
return _which_np(*x).stack(x, axis=axis)
def concatenate(x: Sequence[ndarray], axis=0) -> ndarray:
"""Join a sequence of arrays along an existing axis."""
return _which_np(*x).concatenate(x, axis=axis)
def sqrt(x: ndarray) -> ndarray:
"""Returns the non-negative square-root of an array, element-wise."""
return _which_np(x).sqrt(x)
def where(condition: ndarray, x: ndarray, y: ndarray) -> ndarray:
"""Return elements chosen from `x` or `y` depending on `condition`."""
return _which_np(condition, x, y).where(condition, x, y)
def diag(v: ndarray, k: int = 0) -> ndarray:
"""Extract a diagonal or construct a diagonal array."""
return _which_np(v).diag(v, k)
def clip(a: ndarray, a_min: ndarray, a_max: ndarray) -> ndarray:
"""Clip (limit) the values in an array."""
return _which_np(a, a_min, a_max).clip(a, a_min, a_max)
def eye(n: int) -> ndarray:
"""Return a 2-D array with ones on the diagonal and zeros elsewhere."""
return _which_np().eye(n)
def zeros(shape, dtype=float) -> ndarray:
"""Return a new array of given shape and type, filled with zeros."""
return _which_np().zeros(shape, dtype=dtype)
def zeros_like(a: ndarray) -> ndarray:
"""Return an array of zeros with the same shape and type as a given array."""
return _which_np(a).zeros_like(a)
def ones(shape, dtype=float) -> ndarray:
"""Return a new array of given shape and type, filled with ones."""
return _which_np().ones(shape, dtype=dtype)
def ones_like(a: ndarray) -> ndarray:
"""Return an array of ones with the same shape and type as a given array."""
return _which_np(a).ones_like(a)
def reshape(a: ndarray, newshape: Union[Tuple[int, ...], int]) -> ndarray:
"""Gives a new shape to an array without changing its data."""
return _which_np(a).reshape(a, newshape)
def array(object: Any, dtype=None) -> ndarray:
"""Creates an array given a list."""
try:
np = _which_np(*object)
except TypeError:
np = _which_np(object) # object is not iterable (e.g. primitive type)
return np.array(object, dtype)
| [
"erikfrey@google.com"
] | erikfrey@google.com |
38319b02bd4e57cd29c0ab49c5da043995369dde | 1cee2094b52f23172f262d4290011840066fcfc1 | /cw/cw_proj/model_hyperparameters.py | a4c6efaa929655998fa365b62d7ab3d9597918bc | [] | no_license | VariableDeclared/uop-computer-vision | 3210db7fd8797e0a94b807ee4449b73351b1ef0e | 59c07b9b25bd020409b28f88bb4aa7b9a1e543ca | refs/heads/master | 2020-03-30T14:01:43.298122 | 2019-05-07T08:17:48 | 2019-05-07T08:17:48 | 151,297,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,380 | py | from tensorflow.keras.layers import Dense, LSTM, Dropout
import tensorflow as tf
import pprint
import os
import datetime
import main
import numpy as np
import cv2 as cv
def modelv1(num_frames):
model = tf.keras.Sequential()
model.add(LSTM(100, input_shape=(num_frames, 1000)))
model.add(Dropout(0.5))
model.add(Dense(100, activation="relu"))
model.add(Dense(60, activation="softmax"))
model.compile(
loss=tf.losses.CategoricalCrossentropy(),
optimizer=tf.optimizers.Adam(),
metrics=['accuracy']
)
return model
def modelv2_dyn(*args, **kwargs):
model = tf.keras.Sequential()
model.add(LSTM(kwargs["ltsm_units"], input_shape=(kwargs["num_frames"], 1000)))
model.add(Dropout(0.5))
model.add(Dense(100, activation="relu"))
model.add(Dense(60, activation="softmax"))
model.compile(
loss=tf.losses.CategoricalCrossentropy(),
optimizer=tf.optimizers.Adam(),
metrics=['accuracy']
)
return model
# ltsm_units, dense_units=50, learning_rate=0.001, num_frames=38
def modelv3_dyn(*args, **kwargs):
model = tf.keras.Sequential()
model.add(LSTM(kwargs["ltsm_units"], input_shape=(kwargs["num_frames"], 1000)))
model.add(Dropout(0.5))
model.add(Dense(kwargs["dense_units"]))
model.add(Dense(kwargs["dense_units"], activation="relu"))
model.add(Dense(60, activation="softmax"))
model.compile(
loss=tf.losses.CategoricalCrossentropy(),
optimizer=tf.optimizers.Adam(learning_rate=kwargs["learning_rate"]),
metrics=['accuracy']
)
return model
from sklearn.metrics import accuracy_score
def eval_svm(train, val, *args, **kwargs):
print("[DEBUG] Train Shape: {} Val shape: {}".format(train[0].shape, train[1].shape))
if train is None or val is None:
train, val = main.testv3()
kernel = kwargs["svm_kernel"] if kwargs.get("svm_kernel") else cv.ml.SVM_LINEAR
nu = kwargs["nu"] if kwargs.get("nu") else 0.2
svm = cv.ml.SVM_create()
svm.setNu(nu)
svm.setType(cv.ml.SVM_ONE_CLASS)
svm.setKernel(kernel)
svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6))
# print("[INFO] Keys: %s" % label_to_img.())
svm.train(train[0].astype("float32"), cv.ml.ROW_SAMPLE, train[1].astype("int32"))
# svm.save("trained_/svm")
predicted = svm.predict(val[0].astype("float32"))
print("[DEBUG] {}.{}".format(predicted[1].shape, val[1].shape))
# https://stackoverflow.com/questions/19629331/python-how-to-find-accuracy-result-in-svm-text-classifier-algorithm-for-multil
return accuracy_score(val[1], predicted[1])
def perform_modelv2_run(train=None, validation=None, info=None):
epochs = int(os.environ.get("EPOCHS")) if os.environ.get("EPOCHS") else 15
results = {}
train_tuple = None
val_tuple = None
if train is None or validation is None:
train_tuple, val_tuple = main.testv3()
else:
train_tuple = train
val_tuple = validation
num_frames = 38
dense_units = 100
#unit_size, 100, 0.005, num_frames
#(unit_size, 100, 0.010, num_frames)
models = {
"modelv2": {
"fn": modelv2_dyn,
"opt_args": None
},
"modelv3": {
"fn": modelv3_dyn,
"opt_args": 0.005
},
"modelv4": {
"fn": modelv3_dyn,
"opt_args": 0.010
}#(unit_size, 100, 0.010, num_frames)
}
model_count = 1
for model in models:
dir_prefix = "./runs/{}.{}".format(
model,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
)
model_index = "Model:{}".format(model)
for unit_size in [32, 64, 256, 512]:
_model = models[model]["fn"](
ltsm_units=unit_size,
dense_units=dense_units,
learning_rate=models[model]["opt_args"],
num_frames=num_frames
)
results[unit_size] = {}
results[unit_size][model_index] = {}
with open("model_summaries/{}.{}".format(model_index, unit_size), "w") as fh:
_model.summary(print_fn=lambda x: fh.write(x + "\n"))
runs = {}
for run in range(0,10):
accuracy = main.evaluate_model(
_model,
train_tuple[0],
train_tuple[1],
val_tuple[0],
val_tuple[1],
num_frames,
epochs
)
print("[DEBUG] Accuracy: {}".format(accuracy[1]))
runs[run] = float(accuracy[1])
results[unit_size][model_index]["runs"] = runs
model_count += 1
models[model]["fn"] = models[model]["fn"].__name__
results.update({
"epochs": epochs,
"train_folder_len": train_tuple[0].shape[0],
"val_folder_len": val_tuple[0].shape[0],
"info": info,
"model": model
})
save_results(dir_prefix, results)
return train_tuple, val_tuple
def print_results_dict(dictionary):
pp = pprint.PrettyPrinter()
pp.pprint(dictionary)
def print_results(results):
for run in results:
print("{}".format(run[1]*100))
def load_data():
DATA_DIR = os.environ["SAVED_TRAIN_DATA"] if os.environ.get("SAVED_TRAIN_DATA") else "./data"
x_train = np.load("{}/x_train.npy".format(DATA_DIR))
x_val = np.load("{}/x_val.npy".format(DATA_DIR))
y_train = np.load("{}/y_train.npy".format(DATA_DIR))
y_val = np.load("{}/y_val.npy".format(DATA_DIR))
return (x_train, y_train), (x_val, y_val)
def save_results(prefix="run", results={}):
import json
from pathlib import Path
path = Path("./{}".format(prefix))
if not os.path.isdir(path):
os.mkdir(path)
# https://stackoverflow.com/questions/7999935/python-datetime-to-string-without-microsecond-component
file_name = Path(
"./{}/run:{}.json".format(
prefix,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
)
)
json.dump(results, open(file_name, "w"))
print_results_dict(results)
print("[INFO]: Value Saved to {}".format(file_name))
def run_svm(train, val, info, **kwargs):
svm_results = {}
for run in range(1, 10):
accuracy = eval_svm(train, val, kwargs)
svm_results[run] = accuracy
svm_results.update({
"info": info,
})
save_results("SVM", svm_results)
def run_suite(train=None, val=None):
K_FOLD = int(os.environ.get("K_FOLD")) if os.environ.get("K_FOLD") else 10
if train is None or val is None:
train_tuple, val_tuple = load_data()
train, val = train_tuple, val_tuple
from sklearn.model_selection import KFold
kf = KFold(n_splits=K_FOLD)
kf.get_n_splits(train)
info = {
"KFold": K_FOLD
}
for train_index, test_index in kf.split(train[1]):
X_train, X_test = train[0][train_index], train[0][test_index]
y_train, y_test = train[1][train_index], train[1][test_index]
# run_epochs = ["60", "120", "240", "480"]
# run_epochs = ["120", "240", "480", "920"]
run_epochs = ["240"]
for kernel in [cv.ml.SVM_LINEAR, cv.ml.SVM_INTER, cv.ml.SVM_CHI2]:
for c in [0, 0.001, 0.01]:
i = 0
info.update({
"C": c,
"kernel": kernel
})
while i < X_train[0].shape[0]:
run_svm(
# (X_train, y_train),
# (X_test, y_test),
(X_train[i], np.full((X_train[i].shape[0], 1), np.argmax(y_train[i]))),
(X_test[i%X_test.shape[0]], np.full((X_test[1].shape[0], 1), np.argmax(y_test[i%y_test.shape[0]]))),
info,
C=c,
kernel_type=kernel
)
i += 1
for epoch_count in run_epochs:
os.environ["EPOCHS"] = epoch_count
print("[INFO] Epoch Count: {}".format(epoch_count))
perform_modelv2_run(
(X_train, y_train),
(X_test, y_test),
info
)
return train, val
| [
"petedes@live.com"
] | petedes@live.com |
2edcd56053e680eecd41a98f819562aeef6b12f5 | 2b238627de066356b54503bf762ec6dfd6d26e9a | /BinarySearchTrees/KthLargestValueBST.py | de73ae5a609b561064528d9f98de1cd8e2ef71b7 | [] | no_license | LKhushlani/leetcode | 601e1efa8ae6dbfdd4c3b49710f82cf68b9fa54a | 0a4643ca38224bd4cb4a101a6be3161f35979911 | refs/heads/master | 2023-04-28T08:17:38.286004 | 2023-04-20T02:13:52 | 2023-04-20T02:13:52 | 237,696,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | # This is an input class. Do not edit.
class BST:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class TreeNode:
def __init__(self, noOfVisitedNodes, latestVisitedNode):
self.noOfVisitedNodes = noOfVisitedNodes
self.latestVisitedNode = latestVisitedNode
def findKthLargestValueInBst(tree, k):
# Write your code here.
treeInfo = TreeNode(0, -1)
reverseInorderTraversal(tree,treeInfo, k)
return treeInfo.latestVisitedNode
# O(n+k) time , O h space
def reverseInorderTraversal(node, treeInfo, k):
if node is None or treeInfo.noOfVisitedNodes >=k:
return
reverseInorderTraversal(node.right, treeInfo, k)
if treeInfo.noOfVisitedNodes < k:
treeInfo.noOfVisitedNodes += 1
treeInfo.latestVisitedNode = node.value
reverseInorderTraversal(node.left,treeInfo, k)
| [
"lavinakhushlani@gmail.com"
] | lavinakhushlani@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.