blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4cc3eae8ce8dc40427cfc6263c0d8d9207e33ce | e2590e0a78046a22131b69c76ebde21bf042cdd1 | /ABC201_300/ABC275/A.py | 6bc5a95d16891d1502a3adf5fbd2ff8aa0b3a6a3 | [] | no_license | masato-sso/AtCoderProblems | b8e23941d11881860dcf2942a5002a2b19b1f0c8 | fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8 | refs/heads/main | 2023-01-22T23:57:58.509585 | 2023-01-21T14:07:47 | 2023-01-21T14:07:47 | 170,867,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py |
N = int(input())
H = list(map(int, input().split()))
maxValue = max(H)
ans = 0
for idx,h in enumerate(H):
if(h == maxValue):
ans = idx + 1
break
print(ans) | [
"masato@seijinnoMacBook-Pro-2.local"
] | masato@seijinnoMacBook-Pro-2.local |
9418bf8162cced953666e74e72750c54214a25e4 | ca87c047f49a4aa893224466c4ea54e1801e0de2 | /code/pywin32/excel/extract_excel_data.py | 0659280bc294925a0cfacd0f9fb16e9b812f4540 | [] | no_license | jpereiran/jpereiran-blog | a46504871dfbd1a007090d4a39fe51ddced0032c | 08385e2e8b0a0440d1fda81293f8692c923174a1 | refs/heads/master | 2021-10-11T06:51:28.190808 | 2021-10-04T00:19:49 | 2021-10-04T00:19:49 | 192,151,831 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,971 | py | import win32com.client
import glob
import sys, io
# Open up Excel and make it visible (actually you don't need to make it visible)
excel = win32com.client.Dispatch('Excel.Application')
excel.Visible = True
# Select the path of the folder with all the files
files = glob.glob("folder_path/*.xlsx")
# Redirect the stdout to a file
orig_stdout = sys.stdout
bk = io.open("Answers_Report.txt", mode="w", encoding="utf-8")
sys.stdout = bk
# Go through all the files in the folder
for file in files:
print(file.split('\\')[1])
wb_data = excel.Workbooks.Open(file)
# Get the answers to the Q1A
mission=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C6")
vision =wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C7")
print("Question 1A")
print("Mission:",mission)
print("Vision:" ,vision)
print()
# Get the answers to the Q1B
oe1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C14")
ju1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D14")
oe2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C15")
ju2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D15")
print("Question 1B")
print("OEN1:",oe1, "- JUSTIF:",ju1)
print("OEN2:",oe2, "- JUSTIF:",ju2)
print()
# Get the answers to the Q2A
mision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C6")
vision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C7")
print("Question 2A")
print("Mission SI:",mision)
print("Vision SI:",vision)
print()
# Get the answers to the Q3A
print("Question 3A")
for i in range(5,13):
proy=wb_data.Worksheets("3a_ProySI").Range("B"+str(i))
desc=wb_data.Worksheets("3a_ProySI").Range("D"+str(i))
mcfr=wb_data.Worksheets("3a_ProySI").Range("E"+str(i))
tipo=wb_data.Worksheets("3a_ProySI").Range("F"+str(i))
print("\tProyect:",proy)
print("\tDesc:",desc)
print("\tMacFarlan:",mcfr,"- Tipo",tipo)
print()
# Close the file without saving
wb_data.Close(True)
# Restoring the stdout
sys.stdout = orig_stdout
bk.close()
# Create a new Excel file for the grading template
wb_template = excel.Workbooks.Add()
# Headers of the template
wb_template.Worksheets(1).Range("A1").Value = 'File'
wb_template.Worksheets(1).Range("B1").Value = 'Q1A'
wb_template.Worksheets(1).Range("C1").Value = 'C1A'
wb_template.Worksheets(1).Range("D1").Value = 'Q1B'
wb_template.Worksheets(1).Range("E1").Value = 'C1A'
wb_template.Worksheets(1).Range("F1").Value = 'Q2A'
wb_template.Worksheets(1).Range("G1").Value = 'C2A'
wb_template.Worksheets(1).Range("H1").Value = 'Q3A'
wb_template.Worksheets(1).Range("I1").Value = 'C3A'
# Add the path of each file into the template
for idx, arch in enumerate(files):
wb_template.Worksheets(1).Range("A"+str(idx+2)).Value = arch.replace('\\','/')
# Save the grading template without alerts
excel.DisplayAlerts = False
wb_template.SaveAs(r'folder_path\Grades_Template.xlsx')
# Close the file and the program
wb_template.Close()
excel.DisplayAlerts = True
excel.Quit()
| [
"noreply@github.com"
] | jpereiran.noreply@github.com |
5e759521921a5fbee942af6ff03899975bbd0b35 | 84ab518741695c4cdaaaaad7aacd242a48542373 | /practicePrograms2.py | 2a9bca47df4a22c71dce1bbf0feda7d31fd35710 | [] | no_license | Aakashgarg743/Learn-Python | 755818988dc391dc9cdea7091a6488fdb39b0b3d | b5e832146845ed140e63f7f2151af70d21e44003 | refs/heads/master | 2023-08-11T08:14:00.474551 | 2021-09-29T13:52:33 | 2021-09-29T13:52:33 | 409,439,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | # DICTIONARY
user = input("Welcome To My Dictionary\n['python', 'pip', 'functions']\nEnter any word that are listed above to get the meaning....\n").lower()
dic = {"python": "it is a programming language..", "pip":"it is used to install packages", "funcitons": "it is a block of code that only runs when it is called..."}
if user in dic.keys():
print(dic[user])
else:
print("You entered wrong input...")
# FAULTY - CALCULATOR
def add(num1, num2):
if num1=="56" and num2=="9":
print("77")
else:
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) + float(num2)
else:
val = int(num1) + int(num2)
return val
def sub(num1, num2):
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) - float(num2)
else:
val = int(num1) - int(num2)
return val
def mul(num1, num2):
if num1=="45" and num2=="3":
print("555")
else:
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) * float(num2)
else:
val = int(num1) * int(num2)
return val
def div(num1, num2):
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) / float(num2)
else:
val = int(num1) // int(num2)
return val
if __name__=='__main__':
n1 = input("Enter 1st number...\n")
n2 = input("Enter 2nd number...\n")
if n1.isdigit() and n2.isdigit():
inpu = int(input("What operation you want to perform\nType- \n1. Addition\n2. Subtraction\n3. Multiplication\n4. Division\n"))
if inpu == 1:
print(add(n1, n2))
elif inpu == 2:
print(sub(n1, n2))
elif inpu == 3:
print(mul(n1, n2))
elif inpu ==4:
print(div(n1, n2))
else:
print("Wrong Input...")
else:
print("You enter wrong input") | [
"91084902+Aakashgarg743@users.noreply.github.com"
] | 91084902+Aakashgarg743@users.noreply.github.com |
31e5d88aad90549955249b4cb57b003d157e5527 | 620ca56701bce0add202f3cbe7c62036e4b1e359 | /Course_3/Week_1/validations2.py | 77d004fd5a9ff1e3999c8772722fd59fa072d73d | [
"MIT"
] | permissive | gpastor3/Google-ITAutomation-Python | 1f52dbff0b8f0832ab3fea4ac9c468c667363e1a | 6027750a33e8df883d762223bb0c4a5a95395bc0 | refs/heads/main | 2023-04-05T00:29:10.902116 | 2021-02-04T02:08:06 | 2021-02-04T02:08:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | #!/usr/bin/env python3
"""
This script is used for course notes.
Author: Erick Marin
Date: 01/06/2020
"""
def validate_user(username, minlen):
# An alternative to the raise keyword that we can use for situations where
# we want to check that our code behaves the way it should particularly
# when we want to avoid situations that should never happen. This is the
# assert keyword. This keyword tries to verify that a conditional
# expression is true, and if it's false it raises an assertion error with
# the indicated message.
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
if len(username) < minlen:
return False
if not username.isalnum():
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
return True
| [
"emarin.iot@gmail.com"
] | emarin.iot@gmail.com |
a0d3caee1fbf6c2afadd6139c75f0fb247dbe328 | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/.history/views_20191102195546.py | 879e6589a3c510e2404c8ff9b59bed87520c898f | [] | no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 3,630 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.tokens import RefreshToken
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST',])
@permission_classes([AllowAny,])
def api_create_usuario_view(request):
if request.method == 'POST':
serializer = UsuarioSerializer(data=request.data)
data = {}
if serializer.is_valid():
usuario = serializer.save()
data['response'] = "se registró de forma exitosa"
data['nombre'] = usuario.nombre
data['usuario'] = usuario.usuario
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
@permission_classes([AllowAny,])
def api_login(request):
usuario = request.data.get("usuario")
contraseña = request.data.get("contraseña")
if usuario is None or contraseña is None:
return Response({'error': 'No existen contraseña ni usuario'},
status=HTTP_400_BAD_REQUEST)
usuario = authenticate(usuario=usuario, contraseña=contraseña)
get_tokens_for_user(usuario)
return {
'refresh': str(token),
'access': str(token.access_token),
}
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
refresh = RefreshToken.for_user(user)
def authenticate(usuario, contraseña):
usuario = Usuario.objects.get(usuario= usuario, contraseña=contraseña)
if not usuario:
raise serializers.ValidationError({'error': 'Usuario no existe'},
status=HTTP_404_NOT_FOUND)
return usuario | [
"slalbertojesus@gmail.com"
] | slalbertojesus@gmail.com |
361e4d07975ca9bfa13fde5395e05cfab57a2474 | 847b39a71c85aeea7e3812f15f9bd5811edbec4d | /main2.py | b9ce4d61cef2890774a5093914c407f43e1f4fa5 | [] | no_license | arsalansaad/webcrawler | 559eee2c95e0e4e3699e7788958e57f57c6378ed | 2c70b769ce572d010bb1314303ad786de2304bac | refs/heads/master | 2021-01-19T15:02:52.725367 | 2017-08-21T10:00:27 | 2017-08-21T10:00:27 | 100,939,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import requests
from bs4 import BeautifulSoup
url = "http://www.hindustantimes.com/editorials/"
sourcecode = requests.get(url).text
soup = BeautifulSoup(sourcecode, "html.parser")
for link in soup.findAll("div",{ "class": "media-heading headingfour"}):
print(link.text)
for item in link.findAll('a'):
print(item.get('href'))
# for link in soup.findAll("div",class_="media-heading headingfour"):
# print(link.get('href')) | [
"arsalansaad.iitkgp@gmail.com"
] | arsalansaad.iitkgp@gmail.com |
561487ce846747b6d7fb0034befaeceaa9bf589e | 4ae6e54a01e25d370929b49bbaa91c51b003d41a | /wwwroot/app/cgi-bin/AutograderBackEnd.py | a5ec7da5cc2a9cd932b912fdd77e998cb02ccbfb | [] | no_license | rdasxy/programming-autograder | 8197a827236dc5384f6f3ceeaf2fbadefdd5506c | f885c1cd37721e1cd0b3bf3b49cc44b9adb64d92 | refs/heads/master | 2021-01-22T05:33:28.971055 | 2012-12-27T21:53:24 | 2012-12-27T21:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,367 | py |
# 4th major iteration - refactoring to deal with changed authentication procedures
# and to deal with each problem in parallel.
import os, sys
import subprocess
import random
import string
import winprocess
import win32pipe
import win32file
import pickle
import autograde_utilities
import thread
import Queue
import time
import datetime
import smtplib
import collections
import zipfile
import autograder
def ArchiveResults(JobTriple):
''' Record this attempt in archive.
Gets 3-tuple: Job (itself a named tuple), result (string), error (string, possibly empty)
'''
D = dict()
D['UserID'] = JobTriple[0].UserID
D['CourseNum'] = JobTriple[0].CourseNum
D['ProblemNum'] = JobTriple[0].ProblemNum
D['ProblemID']= JobTriple[0].ProblemID
D['Timestamp'] = JobTriple[0].Timestamp
D['Files']= JobTriple[0].Files
D['Result'] = JobTriple[1]
Path = 'c:/users/public/archive'
Fname = JobTriple[0].UserID + JobTriple[0].CourseNum + "%04d"%JobTriple[0].ProblemID + str(JobTriple[0].Timestamp).replace(' ', '').replace(':','')
Fname = Fname +'.pkl'
Fullname = os.path.join(Path, Fname)
Zipname = os.path.join(Path, 'archive.zip')
F = open(Fullname, 'wb')
pickle.dump(D, F)
F.close()
Z = zipfile.ZipFile(Zipname, 'a', zipfile.ZIP_DEFLATED)
Z.write(Fullname, os.path.basename(Fullname))
Z.close()
os.remove(Fullname)
def EmailResults(AJob, Result, Error):
# includes code from: http://www.mkyong.com/python/how-do-send-email-in-python-via-smtplib/
# setup login information
#print "Emailing results."
prefix = AJob.UserID
if prefix in ('hareb', 'spatzs'):
suffix = '@umkc.edu'
else:
suffix = '@mail.umkc.edu'
Addy = prefix + suffix
gmail_acct = 'umkcautograder@gmail.com'
gmail_pwd = 'SaulAndBrian'
# build message
Body = "\nThis is an automatically generated email from the autograder. Do not reply to this address. "
Body += "Contact the course instructor if you have questions."
Body += "\nHere are the results from your submission for problem %s, %s:\n" % (AJob.ProblemNum, AJob.CourseNum)
Body += Result + '\n' + Error + '\n'
header = 'To:' + Addy + '\n' + 'From: ' + gmail_acct + '\n' + 'Subject:Autograder results \n'
msg = header + Body
# Now deal with the smtp server
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_acct, gmail_pwd)
#print header
smtpserver.sendmail(gmail_acct, Addy, msg)
#print 'done!'
smtpserver.close()
def PostResults(ResultQueue):
''' pull results from queue, deal w/ logging etc.
This function is called as a separate thread. It blocks waiting for things to
be added to the queue; if nothing is added, it blocks until the main process
dies after a 30-sec or so timeout, taking this thread with it.
Queue contains 3-tuples: Job (namedtuple), Result (str), ErrMsg (str, may be empty)'''
# collections.namedtuple(JobType, ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
while not ResultQueue.empty():
#print "Posting results, line 90"
# TODO: Add code to save student's submission in archive.
NextJob = ResultQueue.get() # this blocks as long as necessary.
ArchiveResults(NextJob)
# NextJob[0].Files = None
autograder.ReportGradeStatus(NextJob[0], NextJob[1])
EmailResults(NextJob[0], NextJob[1], NextJob[2])
def Grade(JobList):
''' called by chron job--gets a named tuple representing the list of pending jobs.
Spins off new threads for dealing with each job. Snoozes a bit, then dies.'''
ResultsQueue = Queue.Queue()
SandboxList = list()
while JobList:
Settings = dict()
ProblemDict = dict()
Job = JobList.pop(0)
if not Job.Files: # Student didn't turn anything in
ResultsQueue.put( (Job, 'SubmissionError', 'No files submitted'))
Settings, ProblemDict = SetUpSubmission(Job)
SandboxList.append(ProblemDict['SandboxDir'])
if not Settings: # Can't set up the problem
ResultsQueue.put( (Job, 'SystemError', "Can't set up problem; see administrator"))
return # and we're out of here.
# Otherwise paths are set up & sandbox is ready.
Settings['cs101'] = HandleSubmission
Settings['cs282'] = HandleMARSSubmission
try:
#IOFiles = ProblemDict['IOPairs']
ProblemDict['FileToRun']=os.path.join(ProblemDict['SandboxDir'], ProblemDict['Run'])
if 'ExtraFiles' in ProblemDict:
Extras = ProblemDict['ExtraFiles']
else:
Extras = []
except KeyError:
os.rmdir(ProblemDict['SandboxDir'])
ResultsQueue.put( (Job, 'SystemError', 'Misread configuration data; see administrator'))
return
#NextJob = JobList.pop(0)
## ReportGradeStatus(NextJob.UserID, NextJob.CourseNum, NextJob.ProblemNum,
## NextJob.Timestamp, 'Submitted')
try:
FuncToRun = Settings[Job.CourseNum]
except KeyError:
print "Course number not found, don't know which language to run."
print "Grade, line 138"
FuncToRun(Job, Settings, ProblemDict, ResultsQueue)
#thread.start_new_thread(HandleSubmission, (Job, Settings, ProblemDict, ResultQueue))
# HandleSubmission will post results to queue. Start 1 thread to handle
# results by pulling them off queue & dealing with them.
PostResults(ResultsQueue)
#thread.start_new_thread(PostResults, (ResultQueue,))
#time.sleep(15) # which should be more than enough for everything to finish.
# When this function ends, all threads and the queue they're operating on
# go away. In the vast majority of cases, they're long since done anyway;
# the producer threads (HandleSubmission) are done and the consumer
# (PostResults) is waiting for results that will never come. But just in case
# something was left over & blocked, the end of function will clean them up.
for Dir in SandboxList:
try:
autograde_utilities.Cleanup(Dir)
os.rmdir(Dir)
except Exception, e: # if anything goes wrong, ignore it; utility script will fix later.
#print e
os.chdir('..')
try:
os.rmdir(Dir)
except Exception, e:
pass #print "Still didn't work.", e
def ReadSystemConfig():
try:
F = open('c:/autograder.ini')
Stuff = dict()
for line in F:
Setting = line.split('=')
if Setting[0]:
Key = Setting[0].strip()
Val=Setting[1].strip()
Stuff[Key] = Val
F.close()
except IOError:
pass
except KeyError:
return None
return Stuff
def ReadProblemINI(ProblemPath):
try:
F=open(os.path.join(ProblemPath, 'template.txt'))
except IOError:
return False
ProblemDict=dict()
for line in F:
if len(line) > 2:
thingy = line.split(':')
if thingy[0]:
Key = thingy[0].strip()
Val=thingy[1].strip()
ProblemDict[Key]=Val
F.close()
# Note: Some things might be lists. Convert them.
try:
SubmitList=[F.lower().strip() for F in ProblemDict['SubmissionFiles'].split()]
ProblemDict['SubmissionFiles']=SubmitList
except KeyError:
pass
try:
ExtraList=[F.lower().strip() for F in ProblemDict['ExtraFiles'].split()]
ExtraPath=os.path.join(ProblemPath, 'ExtraFiles')
Extras = [os.path.join(ExtraPath, F) for F in ExtraList]
ProblemDict['ExtraFiles']=Extras
except KeyError:
pass
try:
SubmitList=[F.lower().strip() for F in ProblemDict['IOPairs'].split()]
TupList = list()
while SubmitList:
try:
(i, o) = SubmitList[0], SubmitList[1]
SubmitList.pop(0)
SubmitList.pop(0)
except IndexError:
pass
else:
TupList.append((i, o))
ProblemDict['IOPairs']=TupList
except KeyError:
pass
try:
IOPath=ProblemDict['IOPath']
except KeyError:
IOPath=''
ProblemDict['IOPath'] = os.path.join(ProblemPath, IOPath)
return ProblemDict
def SetUpSubmission(Job):
Settings = ReadSystemConfig()
if not Settings:
return False, "Can't read system configuration"
ProblemPath=os.path.join(Settings['ProblemPath'], Job.CourseNum, '%04d' % Job.ProblemID)
if not os.path.isdir(ProblemPath):
return False, "Can't find problem directory"
else:
Settings['ProblemPath'] = ProblemPath
ProblemDict=ReadProblemINI(ProblemPath)
if not ProblemDict:
return False, "Can't read problem configuration"
TimeStr = str(Job.Timestamp)
# Sandbox dir looks something like:
# Sandbox\abcxyz02072012-01-17120102030000\stuff goes here
# for problem 0207 submitted by student 'abcxyz' on 2012-01-17 at 12:01:02.030000 PM
# Timestamp is a datetime object, and the string version of it has characters
# that can't be part of a directory path. So fix it.
TempDir = Job.UserID + ('%04d' % Job.ProblemNum) + TimeStr
for ch in ' :.,':
TempDir = TempDir.replace(ch, '')
ProblemDict['SandboxDir'] = os.path.join(Settings['SandboxDir'], TempDir)
try:
os.mkdir(ProblemDict['SandboxDir'])
except WindowsError:
ProblemDict['SandboxDir'] = None
return False, "Can't configure problem."
return Settings, ProblemDict
def HandleSubmission(Job, Settings, ProblemDict, ResultsQueue):
''' handle the traffic-cop aspects of a submission.
Parameters:
Job : The job that we're about to process. a named tuple
ResultsQueue: The queue that we should post results to for later processing.
Actions:
For this problem, retrieve the list of system supplied files (if any) and list of (input,output) tuples.
Feed the HandleFile function the problem, submission, and single (i, o) pairs until either:
All input cases have been handled successfully; or
Any submission has returned anything other than 'Correct.'
If any case returned anything other than 'Correct':
Post this job, Status, ErrMsg to results queue.
Example: job, 'SyntaxError', traceback
or: job, 'OutputError', 'Excessive output detected.'
otherwise:
Post this job, 'Correct', '' to results queue
Returns: Nothing
'''
#InputDir = ProblemDict['IOPath']
# Now process each set of I/O files; continue until all done, or an error is hit.
for IOTuple in ProblemDict['IOPairs']:
if 'Extras' not in ProblemDict:
ProblemDict['Extras'] = None
Res, Err = HandleFile(Job,
os.path.join(ProblemDict['IOPath'], IOTuple[0]),
os.path.join(ProblemDict['IOPath'], IOTuple[1]),
ProblemDict)
if Res != 'Correct':
ResultsQueue.put((Job, Res, Err)) # Post results & exit early
#os.rmdir(ProblemDict['SandboxDir'])
return
# If we're here, then all files were processed correctly.
#autograde_utilities.ReportGradeStatus(StudentID, ProblemID, Res)
ResultsQueue.put( (Job, 'Correct', ''))
#os.rmdir(ProblemDict['SandboxDir'])
return
def HandleMARSSubmission(Job, Settings, ProblemDict, ResultsQueue):
'''
Process one student's submission on one set of input data using MARS.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error', 6:'Assembly Error',\
7:'Runtime Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
try:
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except KeyError:
pass
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
#Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('java -jar c:\\Mars.jar nc p sm ae6 se7 %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
elif ExitCode not in (0, 6, 7):
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
# Exit code 6 indicates assembly error
# Exit code 7 indicates runtime error
#Done with files.
In.close()
Out.close()
#Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
#Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, ''
def HandleFile(Job, InputFileName, CorrectOutputFileName, ProblemDict): #FileNameToRun, SystemSuppliedFileList=None):
'''
Process one student's submission on one set of input data.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('python %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, stderr=Err, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
else:
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
#Done with files.
In.close()
Out.close()
Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, Err
def RunTest():
JobType = collections.namedtuple('JobType', ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
JobList = list()
UserID = 'hareb'
CourseNum="CS101"
ProblemNum='1'
ProblemID='0102'
Timestamp=str(time.localtime())
f = open('c:/users/public/problems/cs101/0102/example0102.py').read()
Files = list()
Files.append( ('solution.py', f))
Job = JobType(UserID, CourseNum, ProblemNum, ProblemID, Timestamp, Files)
JobList.append(Job)
f = open('c:/users/public/problems/cs101/0103/example0103.py').read()
Files = list()
Files.append( ('example0103.py', f) )
Timestamp = str(time.localtime())
Job = JobType(UserID, CourseNum, '002', '0103', Timestamp, Files)
JobList.append(Job)
Grade( JobList )
# print "Done."
if __name__ == '__main__':
connection = autograder.getConnection()
Cursor = connection.cursor()
cmd = """UPDATE Jobs SET Status = 'pending' WHERE SequenceNumber = 21"""
Cursor.execute(cmd)
connection.commit()
connection.close()
Jobs = autograder.getJobs()
Grade(Jobs)
#RunTest()
##
## OK, Res, Err = HandleSubmission(1, '0102', ['example0102.py'])
## print "Your result:", Res
## if Err:
## print "Error message:\n", Err
##
## if OK:
## print '\tNeed to update database if this is first success on this problem.'
## else:
## print '\tNeed to update database if this is first attempt on this problem.'
##
| [
"rdasxy@gmail.com"
] | rdasxy@gmail.com |
16468fa7074d1375000d5ddc4377969b545f6089 | 2e9589362c3f53841c101de62e714a5bac3d8096 | /dataset_generator/word_embeddings/document_featurizer.py | b2abcd83bcf17e7f6052f59c79b1425136fb9bf4 | [] | no_license | nikhilsu/CitationRecommender | 8b61bd44c3884de010d698dd013938d6cc13a6dc | 44666e57664980ab6476182aa9a572b7ab68fa07 | refs/heads/master | 2023-04-11T01:07:05.379559 | 2021-05-21T14:31:53 | 2021-05-31T06:15:45 | 183,876,181 | 0 | 0 | null | 2023-03-25T01:08:58 | 2019-04-28T07:59:19 | Python | UTF-8 | Python | false | false | 4,672 | py | import numpy as np
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm
class DocumentFeaturizer(object):
STOPWORDS = {
'abstract', 'about', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for',
'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the',
'this', 'to', 'was', 'what', 'when', 'where', 'who', 'will', 'with',
'the', 'we', 'our', 'which'
}
def __init__(self, raw_dataset, opts):
self.raw_dataset = raw_dataset
self.max_abstract_len = opts.max_abstract_len
self.max_title_len = opts.max_title_len
title_abstract_of_training_data = self.raw_dataset.fetch_collated_training_text(opts.train_split)
max_df_frac = 0.90
self.count_vectorizer = CountVectorizer(
max_df=max_df_frac,
max_features=opts.max_features,
stop_words=self.STOPWORDS
)
self.count_vectorizer.fit(tqdm(title_abstract_of_training_data, desc='Building Count-Vectorizer'))
self.word_to_index = dict((word, index + 1) for index, word in enumerate(self.count_vectorizer.vocabulary_))
self.n_features = 1 + len(self.word_to_index)
opts.n_features = self.n_features
def __index_of_word(self, word):
return self.word_to_index[word] if word in self.word_to_index else None
def __word_to_index_features(self, document):
x_indexes = []
for words in document:
indexes = []
for word in words:
index = self.__index_of_word(word)
if index:
indexes.append(index)
x_indexes.append(indexes)
return x_indexes
def __extract_textual_features(self, text, max_len):
return np.asarray(pad_sequences(self.__word_to_index_features([text]), max_len)[0], dtype=np.int32)
@staticmethod
def __extract_citation_features(documents):
return np.log([max(doc['in_citation_count'] - 1, 0) + 1 for doc in documents])
@staticmethod
def __extract_common_types_features(d_qs, candidates):
common_types = [np.intersect1d(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)]
common_types_features = np.zeros_like(d_qs)
for i, intersection in enumerate(common_types):
common_types_features[i, :len(intersection)] = intersection
return common_types_features
@staticmethod
def __extract_sim_scores(d_qs, candidates, candidate_selector):
return np.asarray(
[candidate_selector.cosine_similarity(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)])
def featurize_documents(self, documents):
features = {
'title':
np.asarray([self.__extract_textual_features(doc['title'], self.max_title_len) for doc in documents]),
'abstract':
np.asarray(
[self.__extract_textual_features(doc['abstract'], self.max_abstract_len) for doc in documents])
}
return features
def extract_features(self, d_qs, candidates, candidate_selector=None):
for_nn_rank = candidate_selector is not None
d_q_features = self.featurize_documents(d_qs)
candidate_features = self.featurize_documents(candidates)
features = {
'query-title-text':
d_q_features['title'],
'query-abstract-text':
d_q_features['abstract'],
'candidate-title-text':
candidate_features['title'],
'candidate-abstract-text':
candidate_features['abstract']
}
if for_nn_rank:
citation_features = DocumentFeaturizer.__extract_citation_features(candidates)
common_title = DocumentFeaturizer.__extract_common_types_features(d_q_features['title'],
candidate_features['title'])
common_abstract = DocumentFeaturizer.__extract_common_types_features(d_q_features['abstract'],
candidate_features['abstract'])
similarity_score_features = DocumentFeaturizer.__extract_sim_scores(d_qs, candidates, candidate_selector)
features['query-candidate-common-title'] = common_title
features['query-candidate-common-abstract'] = common_abstract
features['candidate-citation-count'] = citation_features
features['similarity-score'] = similarity_score_features
return features
| [
"nikhilsulegaon@gmail.com"
] | nikhilsulegaon@gmail.com |
3ce6b9f20d08c14c582b9278fa91e5bb702c29b2 | e3472add507c7fc16d013c2e318ca4e28158a13a | /tcc_tf/deterministic_alignment.py | 4e8c87dc31aabe92912c8f4e9dd78be57da37059 | [
"Apache-2.0"
] | permissive | JiaHeng-DLUT/tcc_Temporal_Cycle_Consistency_Loss.pytorch | ebd5a9eba26a2332d81743c95a460eef2c690cb4 | 61490f457b406366f847822962f607e4c3d3e1bd | refs/heads/main | 2022-12-27T20:33:20.462667 | 2020-10-11T11:12:05 | 2020-10-11T11:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,375 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deterministic alignment between all pairs of sequences in a batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from .losses import classification_loss
from .losses import regression_loss
def pairwise_l2_distance(embs1, embs2):
"""Computes pairwise distances between all rows of embs1 and embs2."""
norm1 = tf.reduce_sum(tf.square(embs1), 1)
norm1 = tf.reshape(norm1, [-1, 1])
norm2 = tf.reduce_sum(tf.square(embs2), 1)
norm2 = tf.reshape(norm2, [1, -1])
# Max to ensure matmul doesn't produce anything negative due to floating
# point approximations.
dist = tf.maximum(
norm1 + norm2 - 2.0 * tf.matmul(embs1, embs2, False, True), 0.0)
return dist
def get_scaled_similarity(embs1, embs2, similarity_type, temperature):
"""Returns similarity between each all rows of embs1 and all rows of embs2.
The similarity is scaled by the number of channels/embedding size and
temperature.
Args:
embs1: Tensor, Embeddings of the shape [M, D] where M is the number of
embeddings and D is the embedding size.
embs2: Tensor, Embeddings of the shape [N, D] where N is the number of
embeddings and D is the embedding size.
similarity_type: String, Either one of 'l2' or 'cosine'.
temperature: Float, Temperature used in scaling logits before softmax.
Returns:
similarity: Tensor, [M, N] tensor denoting similarity between embs1 and
embs2.
"""
channels = tf.cast(tf.shape(embs1)[1], tf.float32)
# Go for embs1 to embs2.
if similarity_type == 'cosine':
similarity = tf.matmul(embs1, embs2, transpose_b=True)
elif similarity_type == 'l2':
similarity = -1.0 * pairwise_l2_distance(embs1, embs2)
else:
raise ValueError('similarity_type can either be l2 or cosine.')
# Scale the distance by number of channels. This normalization helps with
# optimization.
similarity /= channels
# Scale the distance by a temperature that helps with how soft/hard the
# alignment should be.
similarity /= temperature
return similarity
def align_pair_of_sequences(embs1,
embs2,
similarity_type,
temperature):
"""Align a given pair embedding sequences.
Args:
embs1: Tensor, Embeddings of the shape [M, D] where M is the number of
embeddings and D is the embedding size.
embs2: Tensor, Embeddings of the shape [N, D] where N is the number of
embeddings and D is the embedding size.
similarity_type: String, Either one of 'l2' or 'cosine'.
temperature: Float, Temperature used in scaling logits before softmax.
Returns:
logits: Tensor, Pre-softmax similarity scores after cycling back to the
starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
"""
max_num_steps = tf.shape(embs1)[0]
# Find distances between embs1 and embs2.
sim_12 = get_scaled_similarity(embs1, embs2, similarity_type, temperature)
# Softmax the distance.
softmaxed_sim_12 = tf.nn.softmax(sim_12, axis=1)
# Calculate soft-nearest neighbors.
nn_embs = tf.matmul(softmaxed_sim_12, embs2)
# Find distances between nn_embs and embs1.
sim_21 = get_scaled_similarity(nn_embs, embs1, similarity_type, temperature)
logits = sim_21
labels = tf.one_hot(tf.range(max_num_steps), max_num_steps)
return logits, labels
def compute_deterministic_alignment_loss(embs,
steps,
seq_lens,
num_steps,
batch_size,
loss_type,
similarity_type,
temperature,
label_smoothing,
variance_lambda,
huber_delta,
normalize_indices):
"""Compute cycle-consistency loss for all steps in each sequence.
This aligns each pair of videos in the batch except with itself.
When aligning it also matters which video is the starting video. So for N
videos in the batch, we have N * (N-1) alignments happening.
For example, a batch of size 3 has 6 pairs of sequence alignments.
Args:
embs: Tensor, sequential embeddings of the shape [N, T, D] where N is the
batch size, T is the number of timesteps in the sequence, D is the size
of the embeddings.
steps: Tensor, step indices/frame indices of the embeddings of the shape
[N, T] where N is the batch size, T is the number of the timesteps.
seq_lens: Tensor, Lengths of the sequences from which the sampling was
done. This can provide additional information to the alignment loss.
num_steps: Integer/Tensor, Number of timesteps in the embeddings.
batch_size: Integer, Size of the batch.
loss_type: String, This specifies the kind of loss function to use.
Currently supported loss functions: 'classification', 'regression_mse',
'regression_mse_var', 'regression_huber'.
similarity_type: String, Currently supported similarity metrics: 'l2' ,
'cosine' .
temperature: Float, temperature scaling used to scale the similarity
distributions calculated using the softmax function.
label_smoothing: Float, Label smoothing argument used in
tf.keras.losses.categorical_crossentropy function and described in this
paper https://arxiv.org/pdf/1701.06548.pdf.
variance_lambda: Float, Weight of the variance of the similarity
predictions while cycling back. If this is high then the low variance
similarities are preferred by the loss while making this term low
results in high variance of the similarities (more uniform/random
matching).
huber_delta: float, Huber delta described in tf.keras.losses.huber_loss.
normalize_indices: Boolean, If True, normalizes indices by sequence
lengths. Useful for ensuring numerical instabilities doesn't arise as
sequence indices can be large numbers.
Returns:
loss: Tensor, Scalar loss tensor that imposes the chosen variant of the
cycle-consistency loss.
"""
labels_list = []
logits_list = []
steps_list = []
seq_lens_list = []
for i in range(batch_size):
for j in range(batch_size):
# We do not align the sequence with itself.
if i != j:
logits, labels = align_pair_of_sequences(embs[i],
embs[j],
similarity_type,
temperature)
logits_list.append(logits)
labels_list.append(labels)
steps_list.append(tf.tile(steps[i:i+1], [num_steps, 1]))
seq_lens_list.append(tf.tile(seq_lens[i:i+1], [num_steps]))
logits = tf.concat(logits_list, axis=0)
labels = tf.concat(labels_list, axis=0)
steps = tf.concat(steps_list, axis=0)
seq_lens = tf.concat(seq_lens_list, axis=0)
if loss_type == 'classification':
loss = classification_loss(logits, labels, label_smoothing)
elif 'regression' in loss_type:
loss = regression_loss(logits, labels, num_steps, steps, seq_lens,
loss_type, normalize_indices, variance_lambda,
huber_delta)
else:
raise ValueError('Unidentified loss_type %s. Currently supported loss '
'types are: regression_mse, regression_huber, '
'classification.' % loss_type)
return loss
| [
"noreply@github.com"
] | JiaHeng-DLUT.noreply@github.com |
ae8caa3e5755b5b934074980647e9b8a044a2e9a | 2d930aadf19b2ad6ea49725099d2f37475cd57f8 | /test/functional/wallet-dump.py | c3f723a19bbd46584fb33bce6dba37487abcdcbe | [
"MIT"
] | permissive | stratton-oakcoin/oakcoin | ea83774c9f6ea64adb8832770e6219ffb31edef6 | fe53193a50bd3674211448f1dcc39c6f9f042bb2 | refs/heads/master | 2021-01-20T13:22:05.877005 | 2017-05-07T10:09:57 | 2017-05-07T10:09:57 | 90,477,972 | 1 | 2 | null | 2017-05-07T10:09:57 | 2017-05-06T16:58:05 | C++ | UTF-8 | Python | false | false | 4,770 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (start_nodes, start_node, assert_equal, oakcoind_processes)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
oakcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90*2)
if __name__ == '__main__':
WalletDumpTest().main ()
| [
"s.matthew.english@gmail.com"
] | s.matthew.english@gmail.com |
ba730b6a4b4982aa4ff13b5059b8122ad718b1b3 | 9d615b7174eecd4c8401513ca8cc21fc498fef5a | /api/views.py | 457826ebef472c327e82113a59326e527f03c40c | [] | no_license | AlexeySub/prephack | ef13e637da181cd6e46d0ace20bd79c9438fba21 | fe971421438d66f59cb2ce977dc736573c2e9ea3 | refs/heads/master | 2020-05-05T12:31:41.571292 | 2019-04-13T08:00:28 | 2019-04-13T08:00:28 | 180,032,825 | 0 | 0 | null | 2019-04-07T22:45:46 | 2019-04-07T22:45:46 | null | UTF-8 | Python | false | false | 2,920 | py | from api.models import User, UserAuthen, Message
from rest_framework import renderers, parsers
from django.views import View
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password
from django.core import exceptions
from django import db
import jwt, time
from django.shortcuts import render
class UserRegister(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
data['password'] = make_password(data['password'], salt='123')
user = User(name=data['username'].lower(), password=data['password'], email=data['email'].lower(), userType=data['usertype'])
try:
user.save()
except db.IntegrityError:
return HttpResponse('Conflict', status=409)
return HttpResponse('OK', status=200)
class UserAuth(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
print(data['login'].lower())
try:
user = User.objects.get(name=data['login'].lower())
except exceptions.ObjectDoesNotExist:
return HttpResponse('Unauthorized', status=401)
if user.password == make_password(data['password'], salt='123'):
authtoken = jwt.encode(data, 'secret', algorithm='HS256').decode('UTF-8')
userAuth = UserAuthen(user_id=user.id, token=authtoken, is_authenticated=True)
userAuth.save()
return HttpResponse(renderers.JSONRenderer().render({'auth_token': authtoken}))
else:
return HttpResponse('Unauthorized', status=401)
class UserLogout(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
userAuth = UserAuthen.objects.get(token=data['auth_token'])
userAuth.is_authenticated=False
userAuth.save()
return HttpResponse('Ok')
class Chat(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
try:
jwt.decode(data['auth_token'], 'secret', algorithm='HS256')
except jwt.InvalidSignatureError:
return HttpResponse('Unathorized', status=401)
message = Message(user_id=UserAuthen.objects.get(token=data['auth_token']).user_id, text=data['text'])
try:
message.save()
except db.IntegrityError:
return HttpResponse('Conflict', status=409)
return HttpResponse('OK', status=200)
def get(self, request):
data = parsers.JSONParser().parse(request)
try:
jwt.decode(data['auth_token'], 'secret', algorithm='HS256')
except jwt.InvalidSignatureError:
return HttpResponse('Unathorized', status=401)
chat = Message.objects.all().filter(user_id=User.objects.get(name=data['login']).id)
return HttpResponse(renderers.JSONRenderer().render(chat.text))
def index(request):
return render(request, 'index.html')
| [
"mr.317676@gmail.com"
] | mr.317676@gmail.com |
4cf799ae31dfe4802a0d9299a2f9c9087c10afe6 | 0add969034a82912bc6e19abc427abe883ee65bb | /theta_en_time_polar.py | a9683111bde6bafb250a54492723f599975e5624 | [] | no_license | Michael-Gong/New_LPI_python_script | eefd162fdbbc3c614c66e2b157ea5296e3bc8492 | 9de109c6f19aa60bdeaf102e9a1ec0baff5669ad | refs/heads/master | 2020-03-28T16:06:09.631550 | 2020-02-01T08:21:17 | 2020-02-01T08:21:17 | 148,659,608 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,511 | py | #%matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
#mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 28,
}
font2 = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 15,
}
font_size = 28
font_size_2 = 15
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
upper = matplotlib.cm.jet(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_jet = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.viridis(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_viridis = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.rainbow(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_rainbow = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
def pxpy_to_energy(gamma, weight):
binsize = 200
en_grid = np.linspace(50,19950,200)
en_bin = np.linspace(0,20000.0,201)
en_value = np.zeros_like(en_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
en_value[i] = sum(weight[ (en_bin[i]<=gamma) & (gamma<en_bin[i+1]) ])
return (en_grid, en_value)
def theta_to_grid(theta, weight):
binsize = 240
theta_grid = np.linspace(-119.5,119.5,240)
theta_bin = np.linspace(-120,120,241)
theta_value = np.zeros_like(theta_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
theta_value[i] = sum(weight[ (theta_bin[i]<=theta) & (theta<theta_bin[i+1]) ])
return (theta_grid, theta_value)
if __name__ == "__main__":
part_number = 50000
from_path = './p50000_no_T150/'
nsteps = int(sum(1 for line in open(from_path+'t_tot_s.txt'))/part_number)
ntheta = 270
ngg = 120
from_path_list = ['./p50000_no_T150/','./p50000_rr_T150/','./p50000_qe_T150/']
#from_path_list = ['./Data_qe_T500_p50000_try/']
for i in range(np.size(from_path_list)):
from_path = from_path_list[i] #'./Data_qe_T050_p50000/'
to_path = from_path
t0 = np.loadtxt(from_path+'t_tot_s.txt')/2/np.pi
px0 = np.loadtxt(from_path+'px_tot_s.txt')
py0 = np.loadtxt(from_path+'py_tot_s.txt')
t0 = np.reshape(t0,(part_number,nsteps))
px0 = np.reshape(px0,(part_number,nsteps))
py0 = np.reshape(py0,(part_number,nsteps))
gg0 = (px0**2+py0**2+1)**0.5*0.51e-3
ww0 = np.zeros_like(gg0)+1
ww0 = np.zeros_like(gg0)+gg0
theta0 = np.arctan2(py0,px0)
theta_edges = np.linspace(-np.pi,np.pi, ntheta +1)
gg_edges = np.linspace(0.1, 6, ngg +1)
theta_edges_1 = np.linspace(-np.pi,np.pi,ntheta)
gg_edges_1 = np.linspace(0.1, 6, ngg)
for n in range(np.size(t0[0,:])):
H, _, _ = np.histogram2d(gg0[:,n], theta0[:,n], [gg_edges, theta_edges], weights=gg0[:,n])
print('Max H:',np.max(H))
Theta, R = np.meshgrid(theta_edges_1,gg_edges_1)
H_temp = np.sum(H[:,:]*R,0)
print('averaged |theta|=',np.sum(H_temp*abs(theta_edges_1))/np.sum(H_temp)/np.pi*180)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.set_facecolor('whitesmoke')
levels = np.logspace(1,5, 101)
H[H<0.01] = np.nan
img=ax.pcolormesh(Theta, R, H, norm=colors.LogNorm(vmin=0.01, vmax=1e3), cmap='viridis')
# cax = fig.add_axes([0.68,0.97,0.25,0.02])
# cbar=fig.colorbar(img,cax=cax, ticks=[1e3,1e5],orientation='horizontal')
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), fontsize=font_size_2)
# cbar.set_label(r'dI/d$\theta$dE [A.U.]',fontdict=font2)
# ax.tick_params(axis="y", pad=25)
ax.tick_params(axis="x", pad=10)
# ax.set_xticks([])
if (i%3 != 2):
ax.set_xticklabels([])
#ax.set_xlim(10,50)
#ax.set_ylim(0.,1.)
ax.set_xlabel(r'$\theta\ [^o]$',fontdict=font)
# ax.set_rlim(1e-1,1e3)
# ax.set_rmax(1e3)
l_r = np.array([0,1,2,3])
ax.set_rticks(l_r+1)
ax.set_yticklabels([])
# ax.set_yticklabels(['$10^%d$' % x for x in (l_r+1)])
ax.set_rlim(0, 6)
ax.set_rlabel_position(90)
# ax.set_rscale('log')
# ax.set_rscale('log')
# ax.set_thetamin(-90)
# ax.set_thetamax(90)
# ax.set_yticklabels([0.1,1,10,100,1000])
ax.set_xticklabels([0,90,180,270])
#ax.set_theta_zero_location('N')
# ax.set_ylabel(r'$\theta\ [^o]$',fontdict=font)
ax.tick_params(axis='x',labelsize=font_size)
ax.tick_params(axis='y',labelsize=font_size_2)
#ax.set_title('proton_angular_time='+str(time1), va='bottom', y=1., fontsize=20)
# plt.text(-100,650,' t = '++' fs',fontdict=font)
ax.grid(True,linestyle='--',linewidth=1.5,color='grey')
#plt.pcolormesh(x, y, ex.T, norm=mpl.colors.Normalize(vmin=0,vmax=100,clip=True), cmap=cm.cubehelix_r)
# plt.axis([x.min(), x.max(), y.min(), y.max()])
#### manifesting colorbar, changing label and axis properties ####
# cbar=plt.colorbar(pad=0.01)#ticks=[np.min(ex), -eee/2, 0, eee/2, np.min()])
# cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=font_size)
# cbar.set_label('dN/dE [A.U.]',fontdict=font)
# a0=200.0
# alpha=np.linspace(-3.5,0.5,501)
# plt.xlabel(r'$\theta$'+' [degree]',fontdict=font)
# plt.ylabel('time [fs]',fontdict=font)
# plt.xticks([-135,-90,-45,0,45,90,135],fontsize=font_size);
#plt.yticks([0,500,1000,1500],fontsize=font_size);
# plt.title(r'$dN/d\theta$'+' for no RR', fontsize=font_size)
# plt.xlim(-120,120)
# plt.ylim(0,1650)
#plt.title('electron at y='+str(round(y[n,0]/2/np.pi,4)),fontdict=font)
plt.subplots_adjust(top=0.90, bottom=0.11, left=0.1, right=0.93, hspace=0.10, wspace=0.05)
fig = plt.gcf()
fig.set_size_inches(6., 6.)
#fig.set_size_inches(5, 4.5)
fig.savefig(to_path+'theta_en_dist_'+to_path[7:-1]+'_'+str(n).zfill(4)+'.png',format='png',dpi=160)
plt.close("all")
| [
"noreply@github.com"
] | Michael-Gong.noreply@github.com |
bb7be13aa1ae689ed05a4e1ef6b48ef41a63abf7 | 34edc8b21515817caa87aedeb07b87515c33ebd0 | /shipping/serializers.py | c02da959ef30fc9d803c7ff4b5f9b8d0607690d0 | [] | no_license | waelbeso/Ftrina | b20c277030132b195af621d9e739040d42943a9b | 449868f8c095bb920a2aef2e2dc4cb80de8ec82a | refs/heads/master | 2022-09-06T16:34:40.391965 | 2018-05-27T12:19:05 | 2018-05-27T12:19:05 | 134,336,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py |
from shipping.models import Model,Zone
from rest_framework import serializers
from shop.models import WareHouse,Shop
from rest_framework.validators import UniqueTogetherValidator
class ModelSerializer(serializers.ModelSerializer):
def ShippingValidator(value):
import json
if "update" in value['method']:
if value['name']:
try:
Model.objects.get(name=value['name'],shop=value['shop'])
except Model.DoesNotExist:
return
shop_shipping_model = Model.objects.get(name=value['name'],shop=value['shop'])
if str(shop_shipping_model.id) in value['pk']:
return
raise serializers.ValidationError('You have Shipping Model with this name.')
raise serializers.ValidationError('Name is required.')
if "new" in value['method']:
if value['name']:
try:
Model.objects.get(name=value['name'],shop=value['shop'])
except Model.DoesNotExist:
return
raise serializers.ValidationError('You have Shipping Model with this name.')
raise serializers.ValidationError('Name is required.')
name = serializers.JSONField(required=True, validators=[ShippingValidator])
shop = serializers.PrimaryKeyRelatedField(queryset=Shop.objects.filter(),read_only=False)
ware_house = serializers.PrimaryKeyRelatedField(required=True,queryset=WareHouse.objects.filter(),read_only=False)
class Meta:
model = Model
fields = ('id', 'name','shop','ware_house','zone')
def update(self, instance, validated_data):
''' We did not update the shop record '''
ware_house = WareHouse.objects.get(pk=validated_data.pop('ware_house'))
instance.name = validated_data.get('name', instance.name)
instance.ware_house = ware_house
instance.save()
return instance
class ZoneSerializer(serializers.ModelSerializer):
model = serializers.PrimaryKeyRelatedField(queryset=Model.objects.filter(),read_only=False)
country = serializers.CharField(required=True)
province = serializers.CharField(required=True)
price = serializers.DecimalField(max_digits=19, decimal_places=2, coerce_to_string=None, max_value=None, min_value=None)
price_currency = serializers.CharField(max_length=None, min_length=None, allow_blank=False)
class Meta:
model = Zone
fields = ('id', 'model','country','province','price','price_currency')
validators = [
UniqueTogetherValidator(
queryset=Zone.objects.all(),
fields=('model', 'country','province')
)
]
| [
"waelabbas@live.com"
] | waelabbas@live.com |
4122d8dfdf03cb8b82f3ada1eac86eba2d701a0f | 72e76a8eeb3afbbd2d77eb79047410e3944947c5 | /datasets.py | ac3d0e16e9df457d922d542baa0547f0f858d3d7 | [] | no_license | Tirthraj93/Topic-Modelling-and-Clustering | f6a042141ed54f65ce00cd7c51dc138e72ba2f5a | 4a7edaa3845cf18f6bbd57fee740a6bd40c9cbfe | refs/heads/master | 2020-04-17T05:31:55.423827 | 2016-08-30T20:48:15 | 2016-08-30T20:48:15 | 66,976,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from lda.utils import ldac2dtm
def load_ldac(file_path):
return ldac2dtm(open(file_path), offset=0)
def load_vocab(file_path):
with open(file_path) as f:
vocab = tuple(f.read().split())
return vocab
def load_titles(file_path):
with open(file_path) as f:
titles = tuple(line.strip() for line in f.readlines())
return titles
| [
"noreply@github.com"
] | Tirthraj93.noreply@github.com |
b17d3ad44bb4ae3b8a6f3fb5f6c5bbe92883ca46 | 9292bd4bd9589e08fa8277069b20abc0e6f9fd7d | /Clustering/app.py | 585d9a1b10894213868e017694d0e98d33e6322b | [] | no_license | Leonidesguerra/final_project | 53f416b872677a98ff823c6bddf1fb86ac8f3fc6 | 68c9f6b5e13aad89c6528b6b15c16261f33098d1 | refs/heads/main | 2023-06-15T21:29:28.365502 | 2021-06-29T03:03:28 | 2021-06-29T03:03:28 | 377,657,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,139 | py | import numpy as np
from numpy.core.fromnumeric import reshape
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session, session
from sqlalchemy import create_engine, func
# from config import DATABASE_URI
from flask import Flask, jsonify, render_template, redirect
#################################################
# Database Setup
#################################################
app = Flask(__name__)
# engine = sqlalchemy.create_engine(DATABASE_URI)
rds_connection_string = "postgres:imadlefl@localhost:5432/Agriculture_JB"
engine = create_engine(f'postgresql://{rds_connection_string}')
@app.route("/")
def home():
# go to home page to scrape info
return render_template("index.html")
@app.route("/visualizations")
def visualizations():
# go to home page to scrape info
return render_template("Agricultura_HTML.html")
@app.route("/mexican_states")
def perimeter():
# go to home page to scrape info
return render_template("mexican_states.html")
@app.route("/toppais")
def toppais():
data = engine.execute(
"SELECT cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY cultivo LIMIT 20")
#df = pd.read_sql_query(query, engine)
# return df[['cultivo', 'sum']].to_dict()
all_data = []
for record in data:
data_dict = {}
data_dict['cultivo'] = record[0]
data_dict['sum'] = record[1]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/estadocrop")
def estadocrop():
data = engine.execute(
"SELECT estado, cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY estado, cultivo;")
all_data = []
for record in data:
data_dict = {}
data_dict['estado'] = record[0]
data_dict['cultivo'] = record[1]
data_dict['sum'] = record[2]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/mapa")
def mapa():
data = engine.execute(
"SELECT estado, municipio, cultivo, SUM(valorproduccion), AVG(latitud), AVG(longitud), MAX(altitud) FROM agr2017 GROUP BY estado, municipio , cultivo;")
all_data = []
for record in data:
data_dict = {}
data_dict['estado'] = record[0]
data_dict['municipio'] = record[1]
data_dict['cultivo'] = record[2]
data_dict['sum'] = record[3]
data_dict['lat'] = record[4]
data_dict['lng'] = record[5]
data_dict['alt'] = record[6]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/clustering_map")
def clus_map():
data = engine.execute(
"SELECT latitud, longitud, cultivo, estado, clusters ,rendimiento FROM clustering;")
all_data = []
for record in data:
data_dict = {}
data_dict['latitud'] = record[0]
data_dict['longitud'] = record[1]
data_dict['cultivo'] = record[2]
data_dict['estado'] = record[3]
data_dict['clusters'] = record[4]
data_dict['rendimiento'] = record[5]
all_data.append(data_dict)
return jsonify(all_data)
if __name__ == '__main__':
app.run(debug=True)
| [
"leonidesguerra@gmail.com"
] | leonidesguerra@gmail.com |
87542af4bb98ec1f4f2dd18363ced2a123b396b2 | f0eb4d12fdac429d5620c0823af0b0be54d9ae3a | /KerasTracker/QualitativeResultsFigure.py | 2c9149b99f691939d72647c667e16923e340162a | [
"Apache-2.0"
] | permissive | felixVil/LDASegment | 817cf7a5b8d101c7879b293d464c0428e37a776b | 25f59c9f43c76e64c0a1e4131fa3c12bab60b716 | refs/heads/master | 2023-03-28T03:23:20.419209 | 2021-03-30T23:10:35 | 2021-03-30T23:10:35 | 322,999,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | from UtilFunctions import *
import os
import numpy as np
def find_result_per_sequence_tracker_ind(sequence, tracker, ind):
result_sequence_path = os.path.join(results_path, tracker, 'baseline')
result_filename = '%s_001.txt' % sequence
result_filepath = os.path.join(result_sequence_path, sequence, result_filename)
file_id = open(result_filepath, 'r')
lines = file_id.readlines()
file_id.close()
polygon_line = lines[ind]
polygon_line.replace('\n','')
polygon_arr = np.array([float(element) for element in polygon_line.split(',')])
return polygon_arr
sequence_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/vot-toolkit-master-2019/vot-workspace/sequences'
results_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/results_on_tracker_qualitatively_evaluated'
overlay_images_path = 'overlay_images'
if not os.path.exists(overlay_images_path):
os.makedirs(overlay_images_path)
sequences_dict = {'zebrafish1': {'inds' : [14, 31, 57], 'width': 2},
'fish1': {'inds': [143, 278, 316], 'width': 2},
'gymnastics2': {'inds': [178, 194, 206], 'width': 9},
'book': {'inds': [43, 82, 104], 'width': 2},
'conduction1':{'inds': [42, 187], 'width': 2},
'dinosaur': {'inds': [220, 277], 'width' : 9}}
color_dict = {'SiamMask':(255, 255, 255, 128), 'UPDT': (255, 0, 255, 128), 'ATOM':(255, 0, 0, 128), 'LADCF': (0, 0, 255, 128), 'LDATrackerDenseNetDilate':(0, 255, 0, 128)}
for sequence in sequences_dict.keys():
line_width = sequences_dict[sequence]['width']
poi_inds = sequences_dict[sequence]['inds']
frames_folder = os.path.join(sequence_path, sequence, 'color')
for ind in poi_inds:
poly_arrays = []
frames_file = os.path.join(frames_folder, '%08d.jpg' % (ind + 1))
overlay_image_file = os.path.join(overlay_images_path, '%s_%08d.jpg' % (sequence, ind + 1))
for tracker in color_dict.keys():
poly_array = find_result_per_sequence_tracker_ind(sequence, tracker, ind)
if len(poly_array) < 4:
continue # tracker is during failure.
elif len(poly_array) == 4:
#polygon is a standard axis aligned rectangle.
poly_array = convert_rect_to_real_poly(poly_array)
poly_arrays.append(poly_array)
draw_beatiful_polygon(poly_array, frames_file, overlay_image_file, color_dict[tracker], line_width)
frames_file = overlay_image_file
img_overlay = read_image(overlay_image_file)
crop_rect = create_tight_rect_around_locations(poly_arrays, img_overlay.shape)
img_overlay_cropped = img_overlay[crop_rect[0]:crop_rect[1], crop_rect[2]:crop_rect[3]]
img_overlay_cropped_pil = Image.fromarray(img_overlay_cropped, 'RGB')
overlay_cropped_filename = 'cropped_%s_%08d.png' % (sequence, ind + 1)
overlay_cropped_filepath = os.path.join(overlay_images_path, overlay_cropped_filename)
img_overlay_cropped_pil.save(overlay_cropped_filepath, "PNG")
| [
"felixvil@post.bgu.ac.il"
] | felixvil@post.bgu.ac.il |
4c481d87668445176f6e0368afd5521ee3954e1e | 395828af169b8d808057d16a399db7ef0f3bd11c | /first_django/blog/migrations/0003_auto_20200204_2042.py | 29a281decf055703fc2480a376ed28e6059e50b4 | [] | no_license | yeonghan/yozora | 88103cea289dfd4cbad8a120d822db83bf52a1eb | 58174a2a7d5dab04f5736243e0789d628d250fc7 | refs/heads/master | 2020-05-23T01:41:30.325966 | 2020-02-24T15:51:22 | 2020-02-24T15:51:22 | 47,525,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | # Generated by Django 3.0.2 on 2020-02-04 11:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200128_2032'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='Post',
new_name='post',
),
]
| [
"dudgks29@naver.com"
] | dudgks29@naver.com |
eb160d82373fb3bb62f2083ae5cbdbcf702d1379 | 738ae0290d91596086810298eb3ced56967d45d2 | /python-cmd/scrabble.py | 6f1cbea6c0101940856302143274ae5de62a01cf | [] | no_license | kkredit/hs-projects | 6e7a8732331a23eacd154b4c0c611adc8795a0a6 | 97edcedf8116db57791f6b8c4666329f694d13b5 | refs/heads/master | 2021-01-09T09:36:43.379173 | 2016-06-01T21:58:15 | 2016-06-01T21:58:15 | 60,213,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,442 | py | # scrabble.py
# A program to propose possible words in scrabble situations
# (mostly) Kevin Kredit
from string import *#split,lower
WORDLIST_FILENAME = "words.txt"
####################################NOT MINE##################################
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
#from string import *#split,lower
print ("Loading word list from file...")
inFile = open(WORDLIST_FILENAME, 'r')#, 0)
line = inFile.readline()
wordlist = line.split()#split(line)
print (" ", len(wordlist), "words loaded.")
return wordlist
dictionary = load_words()
###############################MINE############################################
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def search(word,start=0,finish=len(dictionary),x=1):
#print x,start,finish,finish-start,(finish+start)/2,\
# dictionary[(finish+start)/2]
if finish-start < 5:
if word in dictionary[start:finish]: return True
else: return False
tword = dictionary[int((start+finish)/2)]
if len(tword) < len(word):
start = (start+finish)/2
elif len(tword) > len(word):
finish = (start+finish)/2
else:##if length is correct
unfound,n = True,0
while unfound:
if alphabet.index(tword[n]) < alphabet.index(word[n]):
start,unfound = int((start+finish)/2),False
elif alphabet.index(tword[n]) > alphabet.index(word[n]):
finish,unfound = int((start+finish)/2),False
elif n+1==len(word):
return True
n += 1
return search(word,start,finish,x+1)
#######################################NOT MINE#########################
def anagram(word):
if word == '':
return ['']
else:
ans = []
for w in anagram(word[1:]):
for pos in range(len(w)+1):
ans.append(w[:pos]+word[0]+w[pos:])
return ans
#######################################MINE################################
def combinations(word,wnum,numletters):#to allow not-all-letter-using words?????????
if wnum == numletters: return [word]
else:
ans = []
for pos in range(len(word)):
ans += combinations(word[:pos]+word[pos+1:],wnum-1,numletters)
return ans
def main():
#from string import lower,split
letters = input('\nWhat are the scrambled letters: ').lower();
printed,numletters,more = [],len(letters),'yes'
while more[0].lower() == 'y' and numletters > 1:
found = 0
for word in combinations(letters,len(letters),numletters):
for w in anagram(word):
if (w not in printed) and search(w):##w in dictionary:#make my own search?
if not found:
print ('\nPossible',numletters,'letter words:\n')
print (w)
printed.append(w)
found += 1
print ('\nTotal:',found)
if numletters == 2: break
numletters -= 1
more = input(str('\nWould you like '+str(numletters)+
' letter combinations? (y/n) '))
if not found:
print ('\nThose letters do not form any words.')
if input('\nAgain? ')[0].lower() == 'y':main()
if __name__=='__main__':main()
| [
"k.kredit.us@ieee.org"
] | k.kredit.us@ieee.org |
3753ecbbf592ed7d15df03a2549a45b42ac22766 | 224a906e91c7c1cc8778466ef785060871ede67d | /name_to_job.py | 4d1e39731dce00448b3ebe524814d3cbaed30fb8 | [] | no_license | Pavanisoma/Salary-Prediction-from-Name-Team-Competition- | 353c7fda0df873e7e41d8283929c9ed49fdc97e9 | 1ddf618cbc4df41171a1b87698c0a7b45f0eb574 | refs/heads/master | 2021-08-08T21:10:57.862710 | 2020-06-14T19:28:51 | 2020-06-14T19:28:51 | 191,850,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,272 | py | import numpy as np
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import nltk
import os
import time
import tensorflow as tf
import csv
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import word_tokenize
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
def pre_processed_job_title(title):
title = title.lower()
lmtzr = WordNetLemmatizer()
lemmatized = [lmtzr.lemmatize(word) for word in word_tokenize(title)]
title = '<start> ' + ' '.join(lemmatized) + ' <end>'
return title
def pre_processed_name(first_name, last_name):
first_name = first_name.replace(',', '').replace('"', '').replace('\'', '').lower()
last_name = last_name.replace(',', '').replace('"', '').replace('\'', '').lower()
return '<start> ' + first_name + ' ' + last_name + ' <end>'
def load_dataset(file="processed_bayarea.csv"):
name_list = []
job_list = []
name_job_pairs = []
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader)
for row in csvreader:
name = pre_processed_name(row[2], row[1])
job = pre_processed_job_title(row[0])
job_list.append(job)
name_list.append(name)
name_job_pairs.append([name, job])
return name_list, job_list, name_job_pairs
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
def load_sequence_data():
# creating cleaned input, output pairs
name_list, job_list, pairs = load_dataset()
# index language using the class defined above
inp_lang = LanguageIndex([name for name, job in pairs])
targ_lang = LanguageIndex([job for name, job in pairs])
inp_lang.create_index()
targ_lang.create_index()
# Vectorize the input and target languages
# name
input_tensor = [[inp_lang.word2idx[s] for s in name.split(' ')] for name, job in pairs]
# job_tite
target_tensor = [[targ_lang.word2idx[s] for s in job.split(' ')] for name, job in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
def gru(units):
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
def evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
units = 1024
sentence = pre_processed_name(first_name, last_name)
inputs = []
for i in sentence.split(' '):
if i in inp_lang.word2idx:
inputs.append(inp_lang.word2idx[i])
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weights to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
def translate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# print('Input: {}'.format(sentence))
# print('Predicted translation: {}'.format(result))
# attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
# plot_attention(attention_plot, sentence.split(' '), result.split(' '))
return result
def main():
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_sequence_data()
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.train.AdamOptimizer()
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
print(tf.train.latest_checkpoint(checkpoint_dir))
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
print(translate('chang', 'shih yu', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ))
print(translate('bui', 'xuan loc', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ))
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | Pavanisoma.noreply@github.com |
22ff9336b110cd98c8003d9035ac0470e51ce429 | 1cd3305944de3d5b76ed91c9e0ac7e26b82f47ff | /2019/src/j4_s1.py | c8c5708d08518a425e322647be8957ec162d4269 | [
"Apache-2.0"
] | permissive | coachlivinglegend/CCC | 4b0b6d4bbe031de88275a2834a12ae74fa7bc54e | 6f98e81c7fef38bf70e68188db38863cc0cba2f4 | refs/heads/master | 2023-04-18T00:41:23.522774 | 2021-05-04T12:55:02 | 2021-05-04T12:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import collections
import itertools
import functools
import math
import re
import bisect
import random
rint = lambda: int(input())
rstr = lambda: input()
rints = lambda: list(map(int, input().split()))
rstrs = lambda: input().split()
wmat = lambda mat, sep: '\n'.join(sep.join(map(str, row)) for row in mat)
warr = lambda arr, sep: sep.join(map(str, arr))
wl = lambda sep, *arr: sep.join(map(str, arr))
ctoi = lambda x : ord(x) - ord('a')
itoc = lambda x : chr(x + ord('a'))
grid = [
[[[1,2],[3,4]], [[2,1],[4,3]]],
[[[3,4],[1,2]], [[4,3],[2,1]]]
]
def main():
s = rstr()
h = v = 0
for ch in s:
if ch == 'H':
h = 1 - h
else:
v = 1 - v
print(wmat(grid[h][v], ' '))
if __name__ == '__main__':
main()
| [
"kylexie186@gmail.com"
] | kylexie186@gmail.com |
3f1b20e6325128b26f23eed22db51edb5211804d | e1d942fc4d0099c4a5fe7cf10fdf7e710da2a147 | /11/JackCompiler.py | 354594cea58f8aa55a8c2273f48dda940a0413fb | [] | no_license | AradAlon/Nand2Tetris | 1c5856a6cf5734661b8e848a4e5fbea5381f4603 | 1ca9948b1495b0f16bfa5c89c4be50944fa2380e | refs/heads/master | 2022-12-15T14:32:10.149762 | 2020-09-19T13:27:15 | 2020-09-19T13:27:15 | 296,870,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,213 | py | import sys
import re
import glob
OUT_PATH = ''
T_KEYWORD = 'keyword'
T_SYM = 'symbol'
T_NUM = 'integerConstant'
T_STR = 'stringConstant'
T_ID = 'identifier'
class JackCompiler:
def __init__(self, jacks):
self.analyze(jacks)
def analyze(self, jacks):
for jack in jacks:
CompilationEngine(jack)
class JackTokenizer:
def __init__(self, jack):
reader = open(jack, 'r')
one_liner = self.one_liner(reader)
self.tokens = self.tokenize(one_liner)
self.index = -1
reader.close()
def one_liner(self, reader):
content = []
line = reader.readline()
while line:
comment_index = line.find('//') if line.find('//') > -1 else len(line)
line = line[:comment_index].strip()
if not line:
line = reader.readline()
continue
content.append(line)
line = reader.readline()
one_liner = ' '.join(content)
one_liner = re.sub(r'/\*(.*?)\*/', '', one_liner).strip()
return one_liner
def tokenize(self, one_liner):
keywords = ['class', 'method', 'function', 'constructor', 'int', 'boolean',
'char', 'void', 'var', 'static', 'field', 'let', 'do', 'if',
'else', 'while', 'return', 'true', 'false', 'null', 'this']
symbols = ['{','}','(',')','[',']','.',',',';','+','-','*','/','&','|','<','>','=','~']
convert_symbols = {
"<": '<',
">": '>',
'"': '"',
"&": '&',
}
tokens = []
keyword_re = r'\b' + r'\b|\b'.join(keywords) + r'\b'
sym_re = '['+re.escape(''.join(symbols))+']'
num_re = r'\d+'
str_re = r'"[^"\n]*"'
id_re = r'[\w\-]+'
word = re.compile(keyword_re+'|'+sym_re+'|'+num_re+'|'+str_re+'|'+id_re)
types = {
T_KEYWORD: keyword_re,
T_SYM: sym_re,
T_NUM: num_re,
T_STR: str_re,
T_ID: id_re,
}
split = word.findall(one_liner)
for word in split:
for typ, reg in types.items():
if re.match(reg, word) != None:
if typ == T_STR:
word = word.strip('"')
# if typ == T_SYM:
# word = convert_symbols.get(word, word)
tokens.append((word,typ))
break
return tokens
@property
def hasMoreTokens(self):
return self.index < len(self.tokens) - 1
def advance(self):
self.index += 1 if self.hasMoreTokens else self.index
@property
def currentToken(self):
return self.tokens[self.index] if self.index > -1 else None
def nextToken(self, LL):
return self.tokens[self.index + LL] if self.hasMoreTokens else None
class CompilationEngine:
label_count = 0
convert_symbols = {'+':'add', '-':'sub', '*':'call Math.multiply 2', '/':'call Math.divide 2',
'<':'lt', '>':'gt', '=':'eq', '&':'and', '|':'or'}
unary_convert_symbols = {'-':'neg', '~':'not'}
def __init__(self, jack):
self.jackTokens = JackTokenizer(jack)
self.vm = VMWriter(jack)
self.symbols = SymbolTable()
self.compileClass()
self.vm.close()
def process(self, expected_typ, *args):
self.jackTokens.advance()
val ,typ = self.jackTokens.currentToken
if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args):
text = '{}, ({} {})'.format(expected_typ, typ, val)
raise ValueError()
return typ, val
def peek(self, expected_typ, *args, LL=1):
val, typ = self.jackTokens.nextToken(LL)
if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args):
return False
return True
@property
def label(self):
self.label_count += 1
return 'label{}'.format(str(self.label_count))
def vm_variable(self, action, name):
kind, type, index = self.symbols.kind_type_index_of(name)
if action == 'push':
self.vm.write_push(kind, index)
if action == 'pop':
self.vm.write_pop(kind, index)
def compileClass(self):
self.process(T_KEYWORD, 'class')
_, self.current_class_name = self.process(T_ID)
self.process(T_SYM, '{')
self.compileClassVarDec()
self.compileSubroutineDec()
self.process(T_SYM, '}')
def compileClassVarDec(self):
while self.peek(T_KEYWORD, 'static', 'field'):
_, kind = self.process(T_KEYWORD, 'static', 'field')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_class_table(name, type, kind)
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, name = self.process(T_ID)
self.symbols.append_class_table(name, type, kind)
self.process(T_SYM, ';')
def compileSubroutineDec(self):
while self.peek(T_KEYWORD, 'constructor', 'function', 'method'):
_, self.current_subroutine_type = self.process(T_KEYWORD, 'constructor', 'function', 'method')
_, type = self.process(T_KEYWORD, 'void', 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'void', 'int', 'char', 'boolean') else self.process(T_ID)
_, self.current_subroutine_name = self.process(T_ID)
self.symbols.start_subroutine()
if self.current_subroutine_type == 'method':
self.symbols.append_subroutine_table('this', self.current_class_name, 'argument')
self.compileParameterList()
self.compileSubroutineBody()
def compileParameterList(self):
self.process(T_SYM, '(')
if self.peek(T_KEYWORD, 'int', 'char', 'boolean') or self.peek(T_ID):
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, 'argument')
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, 'argument')
self.process(T_SYM, ')')
def compileSubroutineBody(self):
self.process(T_SYM, '{')
self.compileVarDec()
func_name = self.current_class_name+'.'+self.current_subroutine_name
num_of_var = self.symbols.var_count('var')
self.vm.write_function(func_name, num_of_var)
self.this_pointer()
self.compileStatements()
self.process(T_SYM, '}')
def this_pointer(self):
if self.current_subroutine_type == 'method':
self.vm.write_push('argument', 0)
self.vm.write_pop('pointer', 0)
elif self.current_subroutine_type == 'constructor':
self.vm.write_push('constant', self.symbols.var_count('field'))
self.vm.write_call('Memory.alloc', 1)
self.vm.write_pop('pointer', 0)
def compileVarDec(self):
while self.peek(T_KEYWORD, 'var'):
_, kind = self.process(T_KEYWORD, 'var')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, kind)
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, kind)
self.process(T_SYM, ';')
def compileStatements(self):
while self.peek(T_KEYWORD, 'let', 'if', 'while', 'do', 'return'):
if self.peek(T_KEYWORD, 'let'):
self.compileLet()
elif self.peek(T_KEYWORD, 'if'):
self.compileIf()
elif self.peek(T_KEYWORD, 'while'):
self.compileWhile()
elif self.peek(T_KEYWORD, 'do'):
self.compileDo()
elif self.peek(T_KEYWORD, 'return'):
self.compileReturn()
def compileLet(self):
self.process(T_KEYWORD, 'let')
_, name = self.process(T_ID)
if self.peek(T_SYM, '['):
self.vm_variable('push', name)
self.process(T_SYM, '[')
self.compileExpression()
self.process(T_SYM, ']')
self.vm.write_arithmetic('add')
self.process(T_SYM, '=')
self.compileExpression()
self.process(T_SYM, ';')
self.vm.write_pop('temp', 1)
self.vm.write_pop('pointer', 1)
self.vm.write_push('temp', 1)
self.vm.write_pop('that', 0)
return
self.process(T_SYM, '=')
self.compileExpression()
self.process(T_SYM, ';')
self.vm_variable('pop', name)
def compileIf(self):
self.process(T_KEYWORD, 'if')
label = self.label
self.compileCondition(label)
if self.peek(T_KEYWORD, 'else'):
self.process(T_KEYWORD, 'else')
self.process(T_SYM, '{')
self.compileStatements()
self.process(T_SYM, '}')
self.vm.write_label(label)
def compileWhile(self):
self.process(T_KEYWORD, 'while')
label = self.label
self.vm.write_label(label)
self.compileCondition(label)
def compileCondition(self, label):
self.process(T_SYM, '(')
self.compileExpression()
self.process(T_SYM, ')')
self.vm.write_arithmetic('not')
else_label = self.label
self.vm.write_if(else_label)
self.process(T_SYM, '{')
self.compileStatements()
self.process(T_SYM, '}')
self.vm.write_goto(label)
self.vm.write_label(else_label)
def compileDo(self):
self.process(T_KEYWORD, 'do')
self.compileSubroutineCall()
self.vm.write_pop('temp', 0)
self.process(T_SYM, ';')
def compileReturn(self):
self.process(T_KEYWORD, 'return')
if not self.peek(T_SYM, ';'):
self.compileExpression()
else:
self.vm.write_push('constant', 0)
self.process(T_SYM, ';')
self.vm.write_return()
def compileExpression(self):
if not self.is_term():
return 0
self.compileTerm()
while self.peek(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '='):
_, op = self.process(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '=')
self.compileTerm()
self.vm.write_arithmetic(self.convert_symbols[op])
return 1
def compileTerm(self):
if self.peek(T_NUM):
_, val = self.process(T_NUM)
self.vm.write_push('constant', val)
elif self.peek(T_STR):
_, string = self.process(T_STR)
self.vm.write_push('constant', len(string))
self.vm.write_call('String.new', 1)
for char in string:
self.vm.write_push('constant', ord(char))
self.vm.write_call('String.appendChar', 2)
elif self.peek(T_KEYWORD, 'true', 'false', 'null', 'this'):
_, word = self.process(T_KEYWORD, 'true', 'false', 'null', 'this')
if word == 'this':
self.vm.write_push('pointer', 0)
elif word == 'true':
self.vm.write_push('constant', 1)
self.vm.write_arithmetic('neg')
else:
self.vm.write_push('constant', 0)
elif self.peek(T_SYM, '('):
self.process(T_SYM, '(')
self.compileExpression()
self.process(T_SYM, ')')
elif self.peek(T_SYM, '-', '~'):
_, op = self.process(T_SYM, '-', '~')
self.compileTerm()
self.vm.write_arithmetic(self.unary_convert_symbols[op])
elif self.peek(T_ID):
if self.peek(T_SYM, '[', LL=2):
_, name = self.process(T_ID)
self.vm_variable('push', name)
self.process(T_SYM, '[')
self.compileExpression()
self.process(T_SYM, ']')
self.vm.write_arithmetic('add')
self.vm.write_pop('pointer', 1)
self.vm.write_push('that', 0)
elif self.peek(T_SYM, '(', '.', LL=2):
self.compileSubroutineCall()
else:
_, name = self.process(T_ID)
self.vm_variable('push', name)
def is_term(self):
return (self.peek(T_NUM) or self.peek(T_STR) or
self.peek(T_KEYWORD, 'true', 'false', 'null', 'this') or
self.peek(T_ID) or self.peek(T_SYM, '(', '-', '~'))
def compileSubroutineCall(self):
num_of_args = 0
_, obj_name = self.process(T_ID)
if self.peek(T_SYM, '.'):
self.process(T_SYM, '.')
_, type, _ = self.symbols.kind_type_index_of(obj_name)
if type:
num_of_args += 1
self.vm_variable('push', obj_name)
obj_name = type
_, func_name = self.process(T_ID)
name = '{}.{}'.format(obj_name, func_name)
else:
self.vm.write_push('pointer', 0)
num_of_args += 1
name = '{}.{}'.format(self.current_class_name, obj_name)
self.process(T_SYM, '(')
num_of_args += self.compileExpressionList()
self.process(T_SYM, ')')
self.vm.write_call(name, num_of_args)
def compileExpressionList(self):
num_of_args = self.compileExpression()
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
self.compileExpression()
num_of_args += 1
return num_of_args
class SymbolTable:
def __init__(self):
self.class_table = {
"field": [
# {
# 'name': 'x',
# 'type': T_NUM,
# },
],
"static": [
# {
# 'name': 'x',
# 'type': T_NUM,
# }
]
}
self.subroutine_tables = [
# {
# "argument": [
# {
# 'name': 'x',
# 'type': T_NUM,
# },
# ],
# "local": [
# {
# 'name': 'x',
# 'type': T_NUM,
# }
# ]
# }
]
def append_class_table(self, name, type, kind):
raw = {
'name': name,
'type': type,
}
self.class_table[kind].append(raw)
def append_subroutine_table(self, name, type, kind):
raw = {
'name': name,
'type': type,
}
self.subroutine_tables[-1][kind].append(raw)
def start_subroutine(self):
element = {
"argument": [],
"var": [],
# "local": []
}
self.subroutine_tables.append(element)
def var_count(self, kind):
if kind in ['field', 'static']:
count = len(self.class_table[kind])
else:
count = len(self.subroutine_tables[-1][kind])
return count
def kind_type_index_of(self, name):
for kind, elements in self.class_table.items():
for element in elements:
if element['name'] == name:
return kind, element['type'], elements.index(element)
for kind, elements in self.subroutine_tables[-1].items():
for element in elements:
if element['name'] == name:
return kind, element['type'], elements.index(element)
return None, None, None
class VMWriter:
def __init__(self, jack):
self.file = open(jack.replace('.jack','.vm'), 'w')
def write(self, line):
self.file.write(line + '\n')
def write_push(self, segment, index):
if segment == 'field':
segment = 'this'
if segment == 'var':
segment = 'local'
line = 'push {} {}'.format(segment, str(index))
self.write(line)
def write_pop(self, segment, index):
if segment == 'field':
segment = 'this'
if segment == 'var':
segment = 'local'
line = 'pop {} {}'.format(segment, str(index))
self.write(line)
def write_arithmetic(self, command):
line = '{}'.format(command)
self.write(line)
def write_label(self, label):
line = 'label {}'.format(label)
self.write(line)
def write_goto(self, label):
line = 'goto {}'.format(label)
self.write(line)
def write_if(self, label):
line = 'if-goto {}'.format(label)
self.write(line)
def write_call(self, name, num_of_args):
line = 'call {} {}'.format(name, str(num_of_args))
self.write(line)
def write_function(self, name, num_of_locals):
line = 'function {} {}'.format(name, str(num_of_locals))
self.write(line)
def write_return(self):
line = 'return'
self.write(line)
def close(self):
self.file.close()
if __name__ == "__main__":
path_or_file = sys.argv[1]
if not path_or_file.endswith('.jack'):
name = path_or_file.split('\\')[-1]
OUT_PATH = path_or_file
num_of_arg = len(sys.argv) - 1
if num_of_arg != 1:
print("expected 1 argument - file or folder, got {} argument/s".format(num_of_arg))
sys.exit()
jacks = glob.glob(path_or_file+'/*.jack') or [path_or_file]
if jacks == []:
print("no jack files in folder")
sys.exit()
trans = JackCompiler(jacks) | [
"noreply@github.com"
] | AradAlon.noreply@github.com |
a2ff8efb83a37d60e0d1299f437db3a37bd87b9a | 1d943d6daf9c25a9737663091d81bb08a6de6ef6 | /main.py | 4ae6d7ee999c4d9cb970c7803bfe124add29fd61 | [] | no_license | Steveineiter/A-Star_visualization | 1b29cbdc0dd3dafbc69d467a9cd2446a04d25336 | 50c24bdcbc3c85650dbe2459648bc20b73da08a9 | refs/heads/main | 2023-01-01T00:34:41.195400 | 2020-10-29T15:58:18 | 2020-10-29T15:58:18 | 308,378,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | try:
import pygame
import sys
import math
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
except:
pass
# Game field
WIDTH, HEIGHT = (900, 900)
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
PURPLE = (148, 62, 143)
# Utility
NUMBER_OF_ROWS = 30
NUMBER_OF_COLUMNS = 30
PIXEL_PER_BOX_WIDTH = WIDTH / NUMBER_OF_ROWS
PIXEL_PER_BOX_HEIGHT = HEIGHT / NUMBER_OF_COLUMNS
BOX_SIZE = 3
# TODO poder if we need this
grid = []
# Mit pygame machen wir die visualisieriung mit tkinter die eingabe usw
def start_up():
# Creating 2D Array
global grid
grid = [[0 for i in range(NUMBER_OF_ROWS)] for j in range(NUMBER_OF_COLUMNS)] # same as the next few lines
# Creating Spots
for i in range(NUMBER_OF_ROWS):
for j in range(NUMBER_OF_COLUMNS):
grid[i][j] = BoxInGrid(i, j)
# Set start and end node
start = grid[5][5]
end = grid[NUMBER_OF_ROWS - 6][NUMBER_OF_COLUMNS - 6]
start.color = end.color = PURPLE
start.is_changeable = end.is_changeable = False
def add_box_neighbor():
pass
class BoxInGrid:
def __init__(self, x, y):
self.x = x
self.y = y
self.color = WHITE
self.is_blocked = False
self.is_changeable = True
def draw(self, window, box_with):
pygame.draw.rect(window, self.color, (self.x * PIXEL_PER_BOX_WIDTH, self.y * PIXEL_PER_BOX_HEIGHT, 10, 10), box_with)
def redraw_window():
# print(grid)
for row in grid:
for box in row:
box.draw(WINDOW, BOX_SIZE)
pygame.display.update()
def handle_mouse_press(mouse_position):
x_axis, y_axis = mouse_position
x_pos = x_axis // PIXEL_PER_BOX_WIDTH
y_pos = y_axis // PIXEL_PER_BOX_HEIGHT
access_point = grid[int(x_pos)][int(y_pos)]
if not access_point.is_blocked and access_point.is_changeable:
access_point.color = BLUE
access_point.is_blocked = True
if __name__ == '__main__':
run = True
fps = 60
clock = pygame.time.Clock()
start_up()
box_in_grid = BoxInGrid(100, 100)
while(run):
clock.tick(fps)
redraw_window()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if pygame.mouse.get_pressed()[0]:
mouse_position = pygame.mouse.get_pos()
handle_mouse_press(mouse_position)
if pygame.key.get_pressed()[pygame.K_RETURN]:
print("yes your majesti")
run = False
add_box_neighbor()
| [
"noreply@github.com"
] | Steveineiter.noreply@github.com |
e3cc6b9117ff7d7c9fee0eba2bd19618379ed048 | 1ab7fff33be75efb4b725cd6c3ba5566c29bed93 | /tutorial/tutorial/urls.py | 1245d82edb7947719fca3d3ca4448a39e0087e7a | [] | no_license | Anjali-Del/Anj | 5b0ea6b5bc2b9c17653014d830e2526ac215ce1b | c361bc29b3da6700c51967590cb5f3abeb66881c | refs/heads/master | 2021-01-15T17:41:27.574646 | 2015-07-30T05:11:00 | 2015-07-30T05:11:00 | 38,672,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('snippets.urls')),
)
| [
"anjali@delhivery.com"
] | anjali@delhivery.com |
d481c060b21ebf733f9e03348fe8dbb008dcb1a0 | 0f8e3eb9c3405409418428148f97f93627a886a5 | /gui.py | 4e644ecf6068f86453d27bbbc2aea5287e181a51 | [] | no_license | shikharsrivastava/Bot-Environment | dfd4a41ffc614f46eea129d102ba4441de39eae2 | 0afbfb61baae49ebe3bb22b3c257a251913c04e6 | refs/heads/master | 2020-06-12T15:44:21.965327 | 2018-01-30T08:01:53 | 2018-01-30T08:01:53 | 75,796,633 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,020 | py | import pygame,sys
from pygame.locals import *
import random
import time
from math import log
import subprocess
pygame.init()
DISPLAY = pygame.display.set_mode((800,800))
pygame.display.set_caption('Checkers')
BLACK = (0,0,0)
WHITE = (255, 255, 255)
RED= (255,0,0)
GREEN = (0, 255,0)
BLUE = (0,0, 255)
AQUA=(0, 255, 255)
FUCHSIA=(255,0, 255)
GRAY=(128, 128, 128)
OLIVE=(128, 128,0)
PURPLE=(128,0, 128)
YELLOW=(255, 255,0)
TEAL=( 0, 128, 128)
row=8
col=8
WIDTH=120
INITIAL_X=120
INITIAL_Y=80
SIDE=80
DISPLAY.fill(WHITE)
col1=WHITE
col2=BLACK
currentColor = col1
colboard=[]
bbw = 0
bbb = 0
side = 'W'
def convertPos(pos):
row=pos/8
col=pos%8
return ((7-row)*8+col)
def buildSquare(i,j):
pygame.draw.rect(DISPLAY,currentColor,(i-SIDE/2,j-SIDE/2,SIDE,SIDE))
def init():
global currentColor;
pygame.draw.rect(DISPLAY,BLACK,(INITIAL_X-SIDE/2,INITIAL_Y-SIDE/2,SIDE*8,SIDE*8),5)
for i in range(0,8):
if i%2==0:
currentColor=col1
else:
currentColor=col2
for j in range(0,8):
x=INITIAL_X+j*SIDE
y=INITIAL_Y+i*SIDE
buildSquare(x,y)
colboard.append(currentColor)
currentColor=col2 if currentColor==col1 else col1
def first():
black = 0
for i in range(1, 8, 2):
row = i / 8
col = i % 8
pos = convertPos(i)
black |= (1 << pos)
for i in range(8, 15, 2):
row = i / 8
col = i % 8
black |= (1 << convertPos(i))
for i in range(17, 24, 2):
row = i / 8
col = i % 8
black |= (1 << convertPos(i))
white = 0;
for i in range(40, 47, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
for i in range(49, 56, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
for i in range(56, 63, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
return (white, black)
def genFen(white, black, side):
board = [[0 for _ in range(8)] for _ in range(8)]
while white > 0:
pos = int(log(white & -white, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
board[row][col] = 1
white -= white & -white
while black > 0:
pos = int(log(black & -black, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
board[row][col] = 2
black -= black & -black
fen = ''
for i in range(8):
row = ''
count = 0
for j in range(8):
if board[i][j] == 0:
count += 1
elif board[i][j] == 1:
if count > 0:
row = row + str(count)
row = row + 'D'
count = 0
elif board[i][j] == 2:
if count > 0:
row = row + str(count)
row = row + 'd'
count = 0
if count > 0:
row = row + str(count)
row = row + '/'
fen = fen + row
fen = fen + side
return fen
def makeBoard(white, black):
init()
while white > 0:
pos = int(log(white & - white, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
pygame.draw.circle(DISPLAY, GREEN, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3)
white -= white & (-white)
while black > 0:
pos = int(log(black & - black, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
pygame.draw.circle(DISPLAY, RED, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3)
black -= black & (-black)
def isValid(move, exe):
fen = genFen(bbw, bbb, side)
out = subprocess.check_output([exe, 'fen', fen, 'isvalid', move])
out = out.split()
if out[0] == '0':
return False
else:
bbw, bbb = int(out[1]), int(out[2])
return True
def bestMove(bw, bb, side, exe):
fen = genFen(bw, bb, side)
out = subprocess.check_output([exe, 'fen', fen, 'best'])
out = out.split()
return map(int, out)
def botfight():
bw, bb = first()
makeBoard(bw, bb)
pygame.display.update()
side = 'W'
while True:
bw, bb = bestMove(bw, bb, side, './a.out')
if bw == -1 and bb == -1:
print "Game over, {} loses".format(side)
break
else:
makeBoard(bw, bb)
pygame.display.update()
time.sleep(0.5)
side = 'B' if side == 'W' else 'W'
if __name__ == "__main__":
botfight()
"""
white, black = first()
bbw = white
bbb = black
makeBoard(bbw, bbb)
print(genFen(bbw, bbb, 'W'))
prev = None
killed = 0
move = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
elif event.type==MOUSEBUTTONDOWN:
mousePos=list(pygame.mouse.get_pos())
mousePos[0]-=INITIAL_X-SIDE/2;
mousePos[1]-=INITIAL_Y-SIDE/2;
col=mousePos[0]/SIDE
row=mousePos[1]/SIDE
if 0<=row<=7 and 0<=col<=7:
print row, col
if prev == None:
pos = convertPos(row*8+col)
move |= pos
prev = pos
print "prev = ", prev
else:
cur = convertPos(row*8+col)
print cur, prev
if cur - prev == 7 or cur - prev == 9:
print "hello"
move |= (cur << 6)
elif cur - prev == 14:
killed += 1
s = 0
for i in range(6, 16):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
move |= (killed << 12)
elif cur - prev == 18:
move |= (1 << (killed + 17))
killed += 1
s = 0
for i in range(6, 16):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
move |= (killed << 12)
else:
s = 0
for i in range(6, 12):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
prev = cur
if (isValid(move, './a.out')):
makeBoard(bbw, bbb)
print((move & 0x3f), ((move >> 6) & 0x3f), ((move >> 12) & 0xf))
killed = ((move >> 12) & 0xf)
for i in range(killed):
print ((move >> (17+i)) & 1)
else:
prev = None
killed = 0
move = 0
pygame.display.update()
"""
| [
"noreply@github.com"
] | shikharsrivastava.noreply@github.com |
3881ce11f6a9512b8d49fa4fb9fdd8eedf5e4ae6 | 954f9a154066c65374b475f925f2e5a138a14162 | /bigdatamining/text_based/parser.py | 548979216515cbe8f5a21eb422fa2f33c8bfa6f7 | [] | no_license | reloadbrain/recommEngine | 8263673945b5af1f73d26c22625a3090e071e952 | 9d3a4227916cb8583e1faef572824a54a067e7d1 | refs/heads/master | 2020-03-11T01:49:48.925712 | 2015-12-03T13:31:48 | 2015-12-03T13:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | import nltk
import sys
import gzip
import json
######################################################
#
# Parser that take a text and reduce it to a tokenized/stemmed
# string.
#
# To debug call python parser.py
#
######################################################
def extract_terms(text):
#divide text in tokens and labeld them based on what type of word they are
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
#filter the list and select only nouns
filtered = []
for tupla in tagged:
if 'NN' in tupla[1]:
filtered.append(tupla[0])
#stemm the list and join it in a string
stemmer = nltk.SnowballStemmer("english")
lst = []
for i in range(len(filtered)):
lst.append(stemmer.stem(filtered[i]))
return ' '.join(lst)
def extract_reviews(path):
set = gzip.open(path, 'r')
list = []
already_parsed = {}
for line in set:
temp = {}
parsedline = json.loads(line)
try:
if 'asin' in parsedline and 'reviewText' in parsedline: #ASIN not exists skip
temp['asin'] = parsedline['asin']
temp['text'] = parsedline['reviewText']
#if this item has already a review concat this with the otherone
if temp['asin'] in already_parsed:
index = next(i for (i, d) in enumerate(list) if d["asin"] == temp['asin'])
list[index]['text'] = list[index]['text'] + " " + temp['text']
else:
already_parsed[temp['asin']] = True
list.append(temp)
except (RuntimeError, TypeError, NameError):
print "EXCEPTION: error " + str(RuntimeError)
#print "Found " + str(len(list))
return list
if __name__ == '__main__':
test_string = raw_input("Please enter something: ")
result = extract_terms (test_string)
print result | [
"Martintoni@MacBook-Pro-di-Martintoni.local"
] | Martintoni@MacBook-Pro-di-Martintoni.local |
1123236231c7d7542bb38bab826fbc2184d101e5 | 01b77be351755b7f2b49d40744751cf22f3953cf | /tools/json_schema_compiler/compiler.py | 38235e07f9c9833705f99c341b718ad1db3fdb11 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | bwahn/Havana | 4159876f98850fbfe873ccaaa3dc38739537e9f3 | 5e8bc991ea7e251e98efb6e54e0b8573e5503aa6 | refs/heads/master | 2020-05-31T21:40:08.597468 | 2013-09-03T15:40:14 | 2013-09-03T15:40:14 | 12,556,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generator for C++ structs from api json files.
The purpose of this tool is to remove the need for hand-written code that
converts to and from base::Value types when receiving javascript api calls.
Originally written for generating code for extension apis. Reference schemas
are in chrome/common/extensions/api.
Usage example:
compiler.py --root /home/Work/src --namespace extensions windows.json
tabs.json
compiler.py --destdir gen --root /home/Work/src
--namespace extensions windows.json tabs.json
"""
import cc_generator
import cpp_type_generator
import h_generator
import idl_schema
import json_schema
import model
import schema_bundle_generator
import optparse
import os.path
import sys
def load_schema(schema):
schema_filename, schema_extension = os.path.splitext(schema)
if schema_extension == '.json':
api_defs = json_schema.Load(schema)
elif schema_extension == '.idl':
api_defs = idl_schema.Load(schema)
else:
sys.exit("Did not recognize file extension %s for schema %s" %
(schema_extension, schema))
return api_defs
def handle_single_schema(filename, dest_dir, root, root_namespace):
schema = os.path.normpath(filename)
schema_filename, schema_extension = os.path.splitext(schema)
api_defs = load_schema(schema)
api_model = model.Model()
for target_namespace in api_defs:
referenced_schemas = target_namespace.get('dependencies', [])
# Load type dependencies into the model.
# TODO(miket): do we need this in IDL?
for referenced_schema in referenced_schemas:
referenced_schema_path = os.path.join(
os.path.dirname(schema), referenced_schema + '.json')
referenced_api_defs = json_schema.Load(referenced_schema_path)
for namespace in referenced_api_defs:
api_model.AddNamespace(namespace,
os.path.relpath(referenced_schema_path, opts.root))
# Gets the relative path from opts.root to the schema to correctly determine
# the include path.
relpath = os.path.relpath(schema, opts.root)
namespace = api_model.AddNamespace(target_namespace, relpath)
if not namespace:
continue
# The output filename must match the input filename for gyp to deal with it
# properly.
out_file = namespace.name
type_generator = cpp_type_generator.CppTypeGenerator(
root_namespace, namespace, namespace.unix_name)
for referenced_namespace in api_model.namespaces.values():
if referenced_namespace == namespace:
continue
type_generator.AddNamespace(
referenced_namespace,
referenced_namespace.unix_name)
h_code = (h_generator.HGenerator(namespace, type_generator)
.Generate().Render())
cc_code = (cc_generator.CCGenerator(namespace, type_generator)
.Generate().Render())
if dest_dir:
with open(
os.path.join(dest_dir, namespace.source_file_dir, out_file + '.cc'),
'w') as cc_file:
cc_file.write(cc_code)
with open(
os.path.join(dest_dir, namespace.source_file_dir, out_file + '.h'),
'w') as h_file:
h_file.write(h_code)
else:
print '%s.h' % out_file
print
print h_code
print
print '%s.cc' % out_file
print
print cc_code
def handle_bundle_schema(filenames, dest_dir, root, root_namespace):
# Merge the source files into a single list of schemas.
api_defs = []
for filename in filenames:
schema = os.path.normpath(filename)
schema_filename, schema_extension = os.path.splitext(schema)
api_defs.extend(load_schema(schema))
api_model = model.Model()
relpath = os.path.relpath(os.path.normpath(filenames[0]), root)
for target_namespace in api_defs:
api_model.AddNamespace(target_namespace, relpath)
type_generator = cpp_type_generator.CppTypeGenerator(root_namespace)
for referenced_namespace in api_model.namespaces.values():
type_generator.AddNamespace(
referenced_namespace,
referenced_namespace.unix_name)
generator = schema_bundle_generator.SchemaBundleGenerator(
api_model, api_defs, type_generator)
api_h_code = generator.GenerateAPIHeader().Render()
schemas_h_code = generator.GenerateSchemasHeader().Render()
schemas_cc_code = generator.GenerateSchemasCC().Render()
if dest_dir:
basedir = os.path.join(dest_dir, 'chrome/common/extensions/api')
with open(os.path.join(basedir, 'generated_api.h'), 'w') as h_file:
h_file.write(api_h_code)
with open(os.path.join(basedir, 'generated_schemas.h'), 'w') as h_file:
h_file.write(schemas_h_code)
with open(os.path.join(basedir, 'generated_schemas.cc'), 'w') as cc_file:
cc_file.write(schemas_cc_code)
else:
print 'generated_api.h'
print
print api_h_code
print
print 'generated_schemas.h'
print
print schemas_h_code
print
print 'generated_schemas.cc'
print
print schemas_cc_code
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Generates a C++ model of an API from JSON schema',
usage='usage: %prog [option]... schema')
parser.add_option('-r', '--root', default='.',
help='logical include root directory. Path to schema files from specified'
'dir will be the include path.')
parser.add_option('-d', '--destdir',
help='root directory to output generated files.')
parser.add_option('-n', '--namespace', default='generated_api_schemas',
help='C++ namespace for generated files. e.g extensions::api.')
parser.add_option('-b', '--bundle', action="store_true", help=
'''if supplied, causes compiler to generate bundle files for the given set of
source files.''')
(opts, args) = parser.parse_args()
if not args:
sys.exit(0) # This is OK as a no-op
dest_dir = opts.destdir
root_namespace = opts.namespace
if opts.bundle:
handle_bundle_schema(args, dest_dir, opts.root, root_namespace)
else:
handle_single_schema(args[0], dest_dir, opts.root, root_namespace)
| [
"BW@BW-PC.(none)"
] | BW@BW-PC.(none) |
cb1af45e6576cfaa0436075b85515ae1b2b235e3 | bd5e4b1317e741e2c241a7285f632e2beefb8cf4 | /bdd_example/settings.py | 8e9ebdb9d57d1d0f441518deb6fff4b0a41d7f97 | [] | no_license | asleao/bdd-django-tutorial-2 | c7e5c19af55c31097d7f705568f7b18b8405acd3 | fe79cf4dded6328290ce34a8b221b8276bc733cb | refs/heads/master | 2020-12-05T17:43:59.062935 | 2016-08-23T14:44:35 | 2016-08-23T14:44:35 | 66,375,593 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | """
Django settings for bdd_example project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_^o_m+ne4=ht(efkt$dpd40-%px!qs++w#g(x8$0%2aa_qj)2@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'behave_django',
'login',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bdd_example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bdd_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"andre.sp.leao@gmail.com"
] | andre.sp.leao@gmail.com |
4ce257895e21ccb0c844c1e6aa51c30a9ac4fe4d | 202f3112b74e0c46f906c95a3914e24a734aa5ea | /polls/models.py | d6bc4118f5fb516f0ed4046cca01b3b02017e094 | [] | no_license | markadeev/djangoapp | 36e19cfac9cff07bffec5f54a903b4ee2d64dded | ac94bfff12a47994f0d2af3a924ef76430d1bf80 | refs/heads/master | 2021-01-06T14:17:23.121078 | 2020-02-20T12:01:52 | 2020-02-20T12:01:52 | 241,357,222 | 0 | 0 | null | 2020-02-20T12:01:54 | 2020-02-18T12:37:04 | Python | UTF-8 | Python | false | false | 643 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
| [
"markadeev@ukr.net"
] | markadeev@ukr.net |
000950e05d418733d1aee53faa55ce0a11927353 | 87ef03b1ff43333361771976397908abeb56e496 | /venv/Lib/site-packages/gunicorn/http/body.py | afde36854d1b6ce7e58bdb115b34e09dbed4eee6 | [
"MIT"
] | permissive | pran01/AlgoVision | cba938db1f56c3b52e4868bcdda5283492b2902e | 40e85f3c55266f43ee103dfa0852a63af306a8d4 | refs/heads/master | 2023-04-05T21:01:39.513718 | 2021-04-30T18:56:33 | 2021-04-30T18:56:33 | 281,875,751 | 33 | 9 | MIT | 2021-03-20T04:56:44 | 2020-07-23T06:58:41 | Python | UTF-8 | Python | false | false | 7,297 | py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import io
import sys
from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator,
InvalidChunkSize)
class ChunkedReader(object):
def __init__(self, req, unreader):
self.req = req
self.parser = self.parse_chunked(unreader)
self.buf = io.BytesIO()
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.parser:
while self.buf.tell() < size:
try:
self.buf.write(next(self.parser))
except StopIteration:
self.parser = None
break
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
def parse_trailers(self, unreader, data):
buf = io.BytesIO()
buf.write(data)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
while idx < 0 and not done:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
if done:
unreader.unread(buf.getvalue()[2:])
return b""
self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx])
unreader.unread(buf.getvalue()[idx + 4:])
def parse_chunked(self, unreader):
(size, rest) = self.parse_chunk_size(unreader)
while size > 0:
while size > len(rest):
size -= len(rest)
yield rest
rest = unreader.read()
if not rest:
raise NoMoreData()
yield rest[:size]
# Remove \r\n after chunk
rest = rest[size:]
while len(rest) < 2:
rest += unreader.read()
if rest[:2] != b'\r\n':
raise ChunkMissingTerminator(rest[:2])
(size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
def parse_chunk_size(self, unreader, data=None):
buf = io.BytesIO()
if data is not None:
buf.write(data)
idx = buf.getvalue().find(b"\r\n")
while idx < 0:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n")
data = buf.getvalue()
line, rest_chunk = data[:idx], data[idx + 2:]
chunk_size = line.split(b";", 1)[0].strip()
try:
chunk_size = int(chunk_size, 16)
except ValueError:
raise InvalidChunkSize(chunk_size)
if chunk_size == 0:
try:
self.parse_trailers(unreader, rest_chunk)
except NoMoreData:
pass
return (0, None)
return (chunk_size, rest_chunk)
def get_data(self, unreader, buf):
data = unreader.read()
if not data:
raise NoMoreData()
buf.write(data)
class LengthReader(object):
def __init__(self, unreader, length):
self.unreader = unreader
self.length = length
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
size = min(self.length, size)
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
buf = io.BytesIO()
data = self.unreader.read()
while data:
buf.write(data)
if buf.tell() >= size:
break
data = self.unreader.read()
buf = buf.getvalue()
ret, rest = buf[:size], buf[size:]
self.unreader.unread(rest)
self.length -= size
return ret
class EOFReader(object):
def __init__(self, unreader):
self.unreader = unreader
self.buf = io.BytesIO()
self.finished = False
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.finished:
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
data = self.unreader.read()
while data:
self.buf.write(data)
if self.buf.tell() > size:
break
data = self.unreader.read()
if not data:
self.finished = True
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
class Body(object):
def __init__(self, reader):
self.reader = reader
self.buf = io.BytesIO()
def __iter__(self):
return self
def __next__(self):
ret = self.readline()
if not ret:
raise StopIteration()
return ret
next = __next__
def getsize(self, size):
if size is None:
return sys.maxsize
elif not isinstance(size, int):
raise TypeError("size must be an integral type")
elif size < 0:
return sys.maxsize
return size
def read(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
if size < self.buf.tell():
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
while size > self.buf.tell():
data = self.reader.read(1024)
if not data:
break
self.buf.write(data)
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
def readline(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
data = self.buf.getvalue()
self.buf = io.BytesIO()
ret = []
while 1:
idx = data.find(b"\n", 0, size)
idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0
if idx:
ret.append(data[:idx])
self.buf.write(data[idx:])
break
ret.append(data)
size -= len(data)
data = self.reader.read(min(1024, size))
if not data:
break
return b"".join(ret)
def readlines(self, size=None):
ret = []
data = self.read()
while data:
pos = data.find(b"\n")
if pos < 0:
ret.append(data)
data = b""
else:
line, data = data[:pos + 1], data[pos + 1:]
ret.append(line)
return ret
| [
"pran.sinha1.0@gmail.com"
] | pran.sinha1.0@gmail.com |
6db6eac332058fd6e1c5a656fd107b838cd08767 | 2e2494148f19a2f51383a7eb8853c746a60b6db9 | /MemoryBlock.py | d5ff3a06afac5bbd0652a8c34e08b6793ac6c744 | [] | no_license | GrimaldoMike/Compiladores | a79614d77ac9baed3837d76ccfa70f664b62b3ee | 2d01512b537f523d608d79e91ec163ee7e2ab529 | refs/heads/master | 2021-01-10T17:40:55.376425 | 2016-05-06T20:23:58 | 2016-05-06T20:23:58 | 53,536,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | #NO FUE IMPLEMENTADO PARA EL PROYECTO
class MemoryBlock:
def __init__(self, start_dir, ints_start_dir, floats_start_dir, chars_start_dir, strings_start_dir, limit):
'''All blocks have to be in ascending order and non overlapping on init. We leave that to the developer that uses this class'''
self.bools = [ start_dir, 0 ]
self.ints = [ ints_start_dir, 0 ]
self.floats = [ floats_start_dir, 0 ]
self.chars = [ chars_start_dir, 0 ]
self.strings = [ strings_start_dir, 0 ]
self.limit = limit
def __str__(self):
return "MemoryBlock ({start}-{end}): {boolno} bools, {intno} ints, {floatno} floats, {charno} chars, {stringno} strings".format( start=self.bools[0], end=self.limit, boolno=self.bools[1], intno=self.ints[1], floatno=self.floats[1], charno=self.chars[1], stringno=self.strings[1])
def add_bool(self, num=1):
'''Adds a var to the memory block'''
if ( self.bools[0] + self.bools[1] + num ) < self.ints[0]:
self.bools[1] += num
return ( self.bools[0] + self.bools[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el boolean.')
def add_int(self, num=1):
'''Adds a var to the memory block'''
if ( self.ints[0] + self.ints[1] + num ) < self.floats[0]:
self.ints[1] += num
return ( self.ints[0] + self.ints[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el int.')
def add_float(self, num=1):
'''Adds a var to the memory block'''
if ( self.floats[0] + self.floats[1] + num ) < self.chars[0]:
self.floats[1] += num
return ( self.floats[0] + self.floats[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el foat.')
def add_char(self, num=1):
'''Adds a var to the memory block'''
if ( self.chars[0] + self.chars[1] + num ) < self.strings[0]:
self.chars[1] += num
return ( self.chars[0] + self.chars[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el char.')
def add_string(self, num=1):
'''Adds a var to the memory block'''
if ( self.strings[0] + self.strings[1] + num ) < self.limit:
self.strings[1] += num
return ( self.strings[0] + self.strings[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el string.')
| [
"grimaldo.mike@hotmail.com"
] | grimaldo.mike@hotmail.com |
d8863ebbb7cfbc6f46a2659c40eff9f0092bdcf6 | eb8660d8a7c7557af0fd681a4cce305e1fc73ef9 | /grpc/stt_client.py | 3179370f360474849612723415585af7faaf5ca0 | [
"Apache-2.0"
] | permissive | morfeusys/vosk-server | d4639eaaae7b2e171bd99618513100100d94e773 | 955517bfcc8a7ef3f93ed5ace50052234aa3bf74 | refs/heads/master | 2021-01-14T19:18:55.803416 | 2020-02-24T12:57:55 | 2020-02-24T12:57:55 | 242,727,733 | 1 | 0 | Apache-2.0 | 2020-02-24T12:14:17 | 2020-02-24T12:14:16 | null | UTF-8 | Python | false | false | 1,633 | py | #!/usr/bin/python3
import argparse
import grpc
import stt_service_pb2
import stt_service_pb2_grpc
CHUNK_SIZE = 4000
def gen(audio_file_name):
specification = stt_service_pb2.RecognitionSpec(
partial_results=True,
audio_encoding='LINEAR16_PCM',
sample_rate_hertz=8000
)
streaming_config = stt_service_pb2.RecognitionConfig(specification=specification)
yield stt_service_pb2.StreamingRecognitionRequest(config=streaming_config)
with open(audio_file_name, 'rb') as f:
data = f.read(CHUNK_SIZE)
while data != b'':
yield stt_service_pb2.StreamingRecognitionRequest(audio_content=data)
data = f.read(CHUNK_SIZE)
def run(audio_file_name):
channel = grpc.insecure_channel('localhost:5001')
stub = stt_service_pb2_grpc.SttServiceStub(channel)
it = stub.StreamingRecognize(gen(audio_file_name))
try:
for r in it:
try:
print('Start chunk: ')
for alternative in r.chunks[0].alternatives:
print('alternative: ', alternative.text)
print('words: ', alternative.words)
print('Is final: ', r.chunks[0].final)
print('')
except LookupError:
print('No available chunks')
except grpc._channel._Rendezvous as err:
print('Error code %s, message: %s' % (err._state.code, err._state.details))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', required=True, help='audio file path')
args = parser.parse_args()
run(args.path)
| [
"nshmyrev@gmail.com"
] | nshmyrev@gmail.com |
2870cf1b425dae0d303dc3b910f2b3820bac2b3e | c1abf5c7dd599b25d84c2026f97eaccd03dc4e46 | /movedown.py | 1da89fb7214f214508b7e7d52b1b88c29c20d425 | [
"MIT"
] | permissive | oknalv/linky | 09768abe96d95f2dcb67ff91c22663a4a69356cb | 78fba19946e2212b10f3d1a5b27c7d9329556290 | refs/heads/master | 2016-09-13T01:33:29.192646 | 2016-04-29T15:41:13 | 2016-04-29T15:41:13 | 57,393,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | import webapp2
from base import BaseHandler
from link import Link, Container
from google.appengine.api import users
import time
class MoveDownHandler(BaseHandler):
def __init__(self, request = None, response = None):
self.initialize( request, response )
def get(self):
if not self.request.get("id"):
self.set_flash("danger", "forbidden-access")
self.redirect("/")
else:
user = users.get_current_user()
if user:
containers = Container.query(Container.user == user)
cont = None
if not containers.iter().has_next():
cont = Container(user = user)
cont.put()
else:
cont = containers.iter().next()
actual = None
for ind, link in enumerate(cont.links):
if link.name == self.request.get("id"):
actual = ind
break
if actual is not None and actual < len(cont.links):
cont.links[actual], cont.links[actual + 1] = cont.links[actual + 1], cont.links[actual]
cont.put()
time.sleep(1)
self.redirect("/")
else:
self.set_flash("danger", "not-logged-in")
self.redirect("/")
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'merely remarkable came line',
}
app = webapp2.WSGIApplication([
('/movedown', MoveDownHandler)
], debug = True, config = config) | [
"thevlanko@gmail.com"
] | thevlanko@gmail.com |
bd143b97ac92cf6eef6bbe7e91edb34eafbf4540 | 60e9be8297b98075afb304ebae929f9cac30cf42 | /leetCode/Array/Easy/K-diff Pairs in an Array.py | 95442868e44479b644d955b4d724e4873131c538 | [] | no_license | sifact/Leet-Code-Problems | e4bbd0ab2d1349de32521650c9eeaa5ad3b8085f | eb62e8407dd0931841fbbb351aca5c415c226a07 | refs/heads/main | 2023-01-30T05:07:57.904604 | 2020-11-30T16:54:19 | 2020-11-30T16:54:19 | 317,285,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | from collections import Counter
def findPairs(nums, k):
# Count the elements with Counter
# if k > 0 for each element i, check if i + k exist
# if k == 0 for each element i, check if count[i] > 1
hash_map = Counter(nums)
count = 0
for key in hash_map:
if k > 1 and key + k in hash_map or k == 0 and hash_map[key] > 1:
count += 1
return count
# Generator expression
def findPairs2(nums, k):
hash_map = Counter(nums)
return sum(k > 0 and key + k in hash_map or k == 0 and hash_map[key] > 1 for key in hash_map)
a = list(map(int, input().split()))
num = int(input())
print(findPairs2(a, num))
| [
"noreply@github.com"
] | sifact.noreply@github.com |
32f142de985f427b2e3ecba10fa765f0c368c943 | 9460f8e795d65ff8667a9c1b0da7a141d2a9c849 | /blog/views.py | ebff93e410cfe142a882386fce08dfa8d44d8c0c | [] | no_license | wadewow/myblog | 3a06614872637d502a5b24c802429cfdb7b8e0a8 | af498894b3379bc876a93142122a29d31119735c | refs/heads/master | 2021-01-01T16:54:40.704270 | 2017-07-21T13:40:51 | 2017-07-21T13:40:51 | 97,951,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # coding:utf-8
from django.shortcuts import render
# from django.http import HttpResponse
import models
# Create your views here.
def home(request):
'''
下面的语句相当于 select * from article where id = 1
'''
articles = models.Article.objects.all()
return render(request, 'blog/blog_home.html', {'articles': articles})
def content(request, article_id):
article = models.Article.objects.get(pk = article_id)
return render(request, 'blog/blog_content.html', {'article_content': article})
def edit(request,article_id):
print 'id等于:', article_id
if str(article_id) == '0':
return render(request,'blog/blog_edit.html')
article = models.Article.objects.get(pk = article_id)
return render(request,'blog/blog_edit.html',{'article':article})
def form_action(request):
title = request.POST.get('title') # 这里get('title')的title是根据input的name属性值title
content = request.POST.get('content')
article_id = request.POST.get('article_id', '0')
if article_id == '0':
models.Article.objects.create(title = title, content = content)
articles = models.Article.objects.all()
return render(request,'blog/blog_home.html',{'articles':articles})
article = models.Article.objects.get(pk = article_id)
article.title = title
article.content = content
article.save()
return render(request, 'blog/blog_content.html', {'article_content': article})
| [
"949768106@qq.com"
] | 949768106@qq.com |
373f9f9cd537df8df9fb85fee9220607f78f2be6 | de5adea6b67660bfc45150ee56b6cf4957c8c4e7 | /main_app/migrations/0001_initial.py | f522eb7c2263895a61cc3153af186e867e0d5fdf | [] | no_license | arthuroe/treasure_gram | 70049a25009318d947488dea28505f65816d9d84 | 5ce93ed21284fee17640b15546011848de3115ac | refs/heads/develop | 2020-03-18T02:16:19.413381 | 2018-05-23T17:18:58 | 2018-05-23T17:24:16 | 134,182,468 | 0 | 0 | null | 2018-05-28T18:52:48 | 2018-05-20T20:02:49 | Python | UTF-8 | Python | false | false | 824 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('materials', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('img_url', models.CharField(max_length=100)),
],
),
]
| [
"arthur.orache@gmail.com"
] | arthur.orache@gmail.com |
85c85d8ad12001b13683eb1cd155223c1da9f3cf | d69b96f8a2d4a0025b2513d49ad1726d53a9adcc | /sow/console.py | 033266b0b1c668bcddc24cc81ce0afa56dd6f3d4 | [] | no_license | mekhami/Sow | 3ed4fa82f2016899924692c979d5e2ed0ca20166 | aa55d69fa1d18ac9a35a24f67b126e39ca69b721 | refs/heads/master | 2021-01-10T19:16:59.841592 | 2015-06-26T15:50:00 | 2015-06-26T15:50:00 | 30,369,588 | 1 | 1 | null | 2015-12-31T20:00:36 | 2015-02-05T17:55:29 | Python | UTF-8 | Python | false | false | 932 | py | #/usr/bin/env python
###################################
## A Harvest Command Line App ##
###################################
'''Harvest.
Usage:
sow [options]
sow add [(<alias> <hours> <note>)] [-d|--date <date>]
sow show (today|yesterday|week | --date <date>)
sow reauth
sow delete [-a|--all] [(-d|--date <date>)]
Options:
-h --help Show this screen.
--version Show the version.
'''
from docopt import docopt
from commands import add, show, delete
from utils import get_timesheet, get_config, reauth
def _main(args, config, timesheet):
if args['add']:
add(args, config, timesheet)
if args['show']:
show(args, timesheet)
if args['reauth']:
reauth(config)
if args['delete']:
delete(args, timesheet)
def main():
args = docopt(__doc__)
config = get_config()
timesheet = get_timesheet()
_main(args, config, timesheet)
| [
"Lawrence.vanderpool@gmail.com"
] | Lawrence.vanderpool@gmail.com |
ab6ea8ec66229564a0cc2f4945f5415503dcfec8 | 1bb20fd77f973f23878c04b1784569ebe76ca645 | /model/distrib_state.py | 3138fe08510eea286b89c9cba72bb01bc01931c2 | [] | no_license | keyofdeath/Tp-conceprion-objet | af25a838231547678e24aea7bd59533946a554e7 | 45e03ef694684c364f38f2592c5d6675fde04bdd | refs/heads/master | 2020-04-23T10:26:19.035227 | 2019-02-17T09:32:46 | 2019-02-17T09:32:46 | 171,104,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,344 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging.handlers
import os
PYTHON_LOGGER = logging.getLogger(__name__)
if not os.path.exists("log"):
os.mkdir("log")
HDLR = logging.handlers.TimedRotatingFileHandler("log/DistribState.log",
when="midnight", backupCount=60)
STREAM_HDLR = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s")
HDLR.setFormatter(FORMATTER)
STREAM_HDLR.setFormatter(FORMATTER)
PYTHON_LOGGER.addHandler(HDLR)
PYTHON_LOGGER.addHandler(STREAM_HDLR)
PYTHON_LOGGER.setLevel(logging.DEBUG)
# Absolute path to the folder location of this python file
FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
class DistrbState:
def __init__(self, distrib):
"""
:param distrib: (Distrib) distributeur
"""
self.distrib = distrib
def inserer_carte(self, card_number):
"""
Méthode pour inserer une carte dans la machine. Puis met dans l'attribut carte_inseree la carte inserée.
:param card_number: (string) Numéro de ma carte inserée
:return: (bool) True la carte est trouvée. False la carte n'a pas été trouvée
"""
raise Exception("Inserer carte: Can call this function in this state")
def saisire_code(self, code):
"""
Regarde si le code saisi est correct
:param code: (string) code entré
:return: (bool) True code correct sinon False
"""
raise Exception("Saisire Code: Can call this function in this state")
def menu(self, action):
"""
Menu ou l'utilisateur choisie se qu'il veut faire
:param action: (int) Utiliser les constantes dans la classe Distrib
:return: (object) Retourn les infos en fonction de l'action choisie
"""
raise Exception("Menu: Can call this function in this state")
def attente_compt_choisit(self, acount_number):
"""
Fonction pour obtenir les données d'un compte.
:param acount_number: (string) numero du compte
:return: (Dictionnaire) info sur le compte en dictionnaire [numéro, solde, operations]
"""
raise Exception("Attente compt Choisit: Can call this function in this state")
def compt_afficher(self):
"""
Pour retourner aux menu une fois la consultation des compts fini
"""
raise Exception("Compt afficher: Can call this function in this state")
def attente_information_transfer(self, acount_number, credit_to_transfer):
"""
Recupaire les informations entrée pas l'utilisateur est attend qu'il valide
:param acount_number: (int) Numeros de compt a créditer
:param credit_to_transfer: (float) Montant a transferer
"""
raise Exception("Attente information virement: Can call this function in this state")
def confimer_le_virement(self, confirm_transfer):
"""
L'utilisateur valide les information entrée on effectue le transfer
:param confirm_transfer: (bool) True confirm le transfer
:return: (bool) True transfer effectuer
"""
raise Exception("Confirmer le virement: Can call this function in this state")
| [
"swan.blanc.pro@gmail.com"
] | swan.blanc.pro@gmail.com |
3b1a469d9c82b2869b62462652c2a0c924e3bb31 | 470e0a9dc07edfe13ca68f2a1b6d60d0e395e095 | /3-2.py | b67172d7abbc097ec46a4caa894c73eba80c02c4 | [] | no_license | mj08021/ThisIsCodingTestforGetaJob | 77ce8edab2bd855db9b96597982f58251d0bd31e | ad98b368956937065c6c396b2806351a4eaf12a2 | refs/heads/main | 2023-04-28T10:51:02.012344 | 2021-05-16T05:51:58 | 2021-05-16T05:51:58 | 316,853,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # N, M, K를 공백으로 구분하여 입력받기
n, m, k = map(int, input().split())
# N개의 수를 공백으로 구분하여 입력받기
data = list(map(int, input().split()))
data.sort() # 입력받은 수 정렬
first = data[n - 1] # 가장 큰 수
second = data[n - 2] # 두 번째로 큰 수
# 가장 큰 수가 더해지는 횟수 계산
count = int(m / (k + 1)) * k
count += m % (k + 1)
result = 0
result += (count) * first # 가장 큰 수 더하기
result += (m - count) * second # 두 번째로 큰 수 더하기
print(result) # 최종 답안 출력
# ex) input
# 5 8 3
# 2 4 5 4 6 | [
"replituser@example.com"
] | replituser@example.com |
639a8318adc71b502d3f0053794000dbc4d50a3c | 6f8b9e95b2833de2a4f2c8413fe45133e540a5cf | /Sequences/tuples_examples.py | 93edf2eb0dd93f374233d64740dad825f86671a4 | [] | no_license | riteshelias/UMC | a658665d8653ef1ba72d65030b38da7462783ae7 | e30d42192290905b0a878b66f7634500868b174d | refs/heads/master | 2023-01-20T19:32:51.885534 | 2020-11-27T04:10:22 | 2020-11-27T04:10:22 | 316,398,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | dishes = [
("Kaju Katli", "Desi", "Dessert",
(
(1, "Cashew Nuts"),
(2, "Mawa"),
(3, "Sugar")
)
),
("Machow Soup", "Chinese", "Soup",
(
(1, "Noodles"),
(2, "Chicken"),
(3, "Chopped Veggies"),
(4, "Soya Sauce")
)
),
("Hara bhara Kebab", "Desi", "Starters",
(
(1, "Spinach"),
(2, "Corn"),
(3, "Cheese"),
(4, "Potatoes")
)
),
("Tandoori Chicken", "Mughlai", "Starters",
(
(1, "Chicken"),
(2, "Spices"),
(3, "Butter")
)
),
("Navratan Pulav", "Awadhi", "Main Course",
(
(1, "Mix Veggies"),
(2, "Basmati Rice"),
(3, "Dry Fruits")
)
),
("Rogan Josh", "Kashmiri", "Main Course",
(
(1, "Mutton"),
(2, "Spices"),
(3, "Oil"),
(4, "Onions")
)
),
("Rosogolla", "Bengali", "Dessert",
(
(1, "Milk"),
(2, "Sugar"),
(3, "Water"),
(4, "Rose essence")
)
),
]
print(len(dishes))
print()
# for dish in dishes:
# name, ingredients, category = dish
for name, cuisine, category, ingredients in dishes:
# print("Name: {}, Ingredients: {}, Category: {}".format(dish[0], dish[1], dish[2]))
print("Name: {}, Cuisine: {}, Category: {}, Ingredients: {}".format(name, cuisine, category, ingredients))
dish = dishes[1]
print(dish)
print()
ingredient = dish[3]
print(ingredient)
item = ingredient[1]
print(item)
print()
spitem = item[1]
print(spitem)
spitem1 = dishes[1][3][1][1]
print(spitem1)
print(dishes[1][3][1][1])
# for item in ingredient:
# print(item)
| [
"ritesh.elias@gmail.com"
] | ritesh.elias@gmail.com |
023f6b987e1a2d0d2183da7b4e4d3ffb07f79497 | ac0dc4a4c9960bbbdca2db0eaf7c839f552b0546 | /nomdivertit.py | e0d0b5f3828c2a5978d6db4a5ee2855b81e97965 | [] | no_license | HectorGarciaPY/primer1.py | b7d237b82d6e3ca2cd09ea771a6e152c34fb55ff | 802aac5f442b4e1956cdd4f63a7767b94c30a775 | refs/heads/master | 2023-05-14T16:39:53.271639 | 2021-06-02T10:17:09 | 2021-06-02T10:17:09 | 297,622,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | contador=0
while True:
print("Escribe un nombre divertido:")
x=input()
if x[0]==x[(len(x)-1)] and x[1] == x[(len(x)-2)]:
print("Es un nom divertit")
else:
print("Ets un avorrit, el nom no mola")
contador=contador+1
if contador==2:
print("No tens gens d'originalitat. No pots tenir gos, no pots sortir al carrer.\n"" Adéu!")
break
| [
""
] | |
5e0f3c0a44b787914d3dce78b805204bdbc0bee6 | 45ff5b1fc0414693087050cc738010a39833a1c6 | /backend/app/models/user_model.py | 40b513eef4cb444ed6161121188fa11d2eab1dd3 | [] | no_license | hanson190505/full-stack-fastapi-vue | 8606971d86dddc341bd98fa8310c70e4aaf54560 | 37121a3ddc50bcabea69433ac1d8318f7c9d870e | refs/heads/main | 2023-03-17T20:22:13.373644 | 2021-03-01T16:23:49 | 2021-03-01T16:23:49 | 327,338,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | from app.db.base_class import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, JSON, Integer, ForeignKey
class DepartmentModel(Base):
name = Column(String(64), index=True)
parent_department = Column(Integer, ForeignKey('departmentmodel.id'), nullable=True)
sub_department = relationship('DepartmentModel', lazy='joined', join_depth=3)
# users = relationship('UserModel', back_populates='department')
class UserModel(Base):
name = Column(String(128), index=True)
hashed_password = Column(String(1024))
mail = Column(String(64), nullable=True, index=True)
phone = Column(String(32), nullable=True, index=True)
detail = Column(JSON, nullable=True)
# department = relationship('DepartmentModel', back_populates='users')
class RouteModel(Base):
name = Column(String(64))
path = Column(String(64))
pid = Column(Integer, ForeignKey('routemodel.id'), nullable=True)
title = Column(String(64), nullable=True)
detail = Column(JSON, nullable=True)
children = relationship('RouteModel')
| [
"413506012@qq.com"
] | 413506012@qq.com |
781f1bed425ed743952b93b27a6dea0e2e1a1bad | a78e2aa069c38bb197a023df179d0c7e3f4c8469 | /Button.py | 8dd397c672b7d2b5da43f5e93177746ba2b64e14 | [] | no_license | KouhouMohamed/pythonProject | 74f6fa1051e109538d77904cc8a54da2c0a8d8ac | 662e10887bc4272e28a56bc43e953cf1492da5e8 | refs/heads/master | 2023-01-06T04:18:20.244340 | 2020-11-07T09:47:09 | 2020-11-07T09:47:09 | 310,811,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from tkinter import *
from tkinter import ttk #tkk is a class where buttons
def main():
root = Tk() #create a root ( a window)
but1 = ttk.Button(root, text="GetText")
Ent1 = ttk.Entry(root, width=30)
Ent1.pack()
but1.pack() # Add button to root
def ButtClick():
print(Ent1.get()) #get the contenant of Ent1
Ent1.delete(0,END) #clear the entery from begennin to end
but1.config(command=ButtClick)
Logo = PhotoImage(file='help.png')
Logo_r=Logo.subsample(10,10) #resize the image
Logo_r.zoom(15,20)
but1.config(image=Logo_r,compound=LEFT)
root.mainloop() #pour afficher root
if __name__ == '__main__':main()
| [
"m.kouhou-etu@enset-media.ac.ma"
] | m.kouhou-etu@enset-media.ac.ma |
79505b2c69220a3b4844e0e3ff6288faa3bd033b | 006f73f4cc37dda59904a85d346186897f00834a | /sorteo/urls.py | 3807308aa78b5b230eff8c9c0cdb1306d361e6ce | [] | no_license | nathanbernal/sorteo_django | 4ecd2cd85f5d2dcf7d0b825c5d6327ff080c3380 | b8c4ea6b674b3cdff3cd5aed002222955c592a6e | refs/heads/main | 2023-04-26T02:39:50.711438 | 2021-05-19T03:01:33 | 2021-05-19T03:01:33 | 368,727,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from django.urls import include, path
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
router.register(r'usuario', views.UsuarioViewSet)
router.register(r'activacion', views.ActivacionViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/{email}', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"nathanbernal@gmail.com"
] | nathanbernal@gmail.com |
8303bfe6a087932e19cea98165604264c4a08b9a | f1244dd9a531a40f61c57acf7a7f11f9b2c9cb1f | /3-2/CSE 3210 (Artificial Intelligence)/Lab 6/3. Triangle by star.py | 2f99de4ed6a484bc04e642627c9f8dfd1a0cd4cf | [] | no_license | SabirKhanAkash/RUET-Lab-Works | 7e8be66e2d435108bed57b0335feb54d76ba23ef | 3f094a5ca364d92ef42831e9f2dfb75c3baad506 | refs/heads/master | 2022-08-30T10:38:04.209187 | 2022-08-26T18:02:26 | 2022-08-26T18:02:26 | 240,317,974 | 2 | 3 | null | 2022-08-22T05:15:33 | 2020-02-13T17:17:38 | Jupyter Notebook | UTF-8 | Python | false | false | 226 | py | def main():
n = int(input("Enter the value of n: "))
k = 2*n - 2
for i in range(0, n):
for j in range(0, k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print("* ", end="")
print("\r")
main()
| [
"39434260+SabirKhanAkash@users.noreply.github.com"
] | 39434260+SabirKhanAkash@users.noreply.github.com |
e4d288a30baec61e2e198b96b3163e0cf87504db | b62b673d9ade27f3e924f822d5b075e38ae28aa1 | /tag-generator.py | 0b3a2f6604bfbf6b8e487b1159ac54c2965410c0 | [
"MIT"
] | permissive | Bhupesh-V/Bhupesh-V.github.io | 7cad5f3dac12ecab9613780713a18fd9fb466ac2 | 8efd2afe3a5e76df45caf796222a0e498e569ed6 | refs/heads/master | 2023-05-29T01:27:50.233594 | 2023-04-30T13:14:16 | 2023-04-30T13:14:16 | 182,211,988 | 3 | 2 | MIT | 2019-08-27T21:10:42 | 2019-04-19T06:13:58 | CSS | UTF-8 | Python | false | false | 1,349 | py | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: lqian8@jhu.edu
Source: https://github.com/qian256/qian256.github.io/blob/master/tag_generator.py
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__()) | [
"varshneybhupesh@gmail.com"
] | varshneybhupesh@gmail.com |
c82dcbc9cc057d4d5d64d87082af0f1e59d0a74b | 1eddf34d87d1c8fa06a71dd934bfdc4de8fd6752 | /binary_files_generation/stdp_table_generator.py | c928969be2abe27e46d842b9ea2238768d31ef9e | [] | no_license | galluppf/spackage_conv | b6367f0cd93ef02891512733e83a70f984f9b2a3 | 902c6d3be1a4fb7692056814eafd4d94a75a59d6 | refs/heads/master | 2021-01-13T02:32:09.383591 | 2013-09-30T12:04:41 | 2013-09-30T12:04:41 | 12,670,028 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,185 | py | #!/usr/bin/python
"""
Utility to generate an STDP table for SpiNNaker
__author__="francesco"
__date__ ="$22-Mar-2011 18:01:14$"
"""
BINARY_DIRECTORY = '../binaries/'
import ConfigParser, sys
from numpy import arange, zeros
from math import exp, log
from struct import pack
from pacman import *
# packs an array with a given mask for every element. maybe there's a python function doing this? like [ out += pack(mask,i) for i in array ]
def packArray(array, mask):
out = ""
for i in array:
out += pack(mask,i) # h = 4bit words
return out
DEBUG = pacman_configuration.getboolean('stdp_table_generator', 'debug')
p1 = 256
# packs an array with a given mask for every element
def packArray(array, mask):
out = ""
for i in array:
out += pack(mask,i) # h = 4bit words
# print out
return out
def setHeaders(w_min, w_max, ltp_time_window, ltd_time_window, resolution, words):
s = pack("<h", w_min*p1)
s += pack("<h", w_max*p1)
s += pack("<b", ltp_time_window)
s += pack("<b", ltd_time_window)
s += pack("<b", resolution)
s += pack("<b", int( log(resolution, 2)) )
s += pack("<b", words)
return s
def calc_STDP_table(ltp_time_window, ltd_time_window, resolution, A_plus, A_minus, tau_plus, tau_minus, words, zero_value=0):
# print ltd_time_window+ltp_time_window, resolution*words
assert ltd_time_window+ltp_time_window < resolution*words*32*2, "Time window exceeds maxmimum size of %d msec. Decrease ltd/ltp time window or resolution" % (resolution*words*32*2+1)
ltd = arange(resolution*32*4, 0, -resolution)
ltp = arange(resolution, resolution*4*32+resolution, resolution)
if DEBUG: print "[ stdp_table_generator ] :" ,ltd, ltp
out = []
for l in ltd:
out.append( (A_minus*exp(float(-l)/tau_minus) + A_minus*exp(float(-(l+1))/tau_minus))/2 )
if zero_value != 0: print "[ stdp_table_generator ] : setting value in dt = 0 to %f" % zero_value
out.append(zero_value)
for l in ltp:
out.append( (A_plus*exp(float(-l)/tau_plus) + A_plus*exp(float(-(l+1))/tau_plus))/2 )
# Scaling
out = [ int(i*p1) for i in out ]
# words*32 is the size of the ltp and ltd window. The whole table is words*32 + 1 (value in 0) + words*32 = words*32*2+1 bytes long
# left_bound = int(resolution*32*4-ltd_time_window/resolution)
# right_bound = int(words*32+1+ltp_time_window/resolution)
left_bound = 128-ltd_time_window
right_bound = 129 + ltp_time_window
# Truncating the time window with the one specified by ltd/ltp_time_window
# print left_bound, right_bound
out[:left_bound] = zeros(left_bound, 'int')
out[right_bound:] = zeros(128-ltp_time_window, 'int')
if DEBUG: print out
return out
def compile_stdp_table(cfg, out_filename):
"""
compiles an stdp table given dictionary cfg and an output file name
cfg is in the format
cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value']
"""
print "[ stdp_table_generator ] : Writing file", out_filename
f = open(out_filename, mode='w+')
print "[ stdp_table_generator ] : Writing headers"
f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words']))
s = calc_STDP_table(cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value'])
f.write(packArray(s,'<b'))
f.close()
print "[ stdp_table_generator ] : Done!"
def compile_stdp_tts_table(cfg, out_filename):
"""
compiles an stdp table given dictionary cfg and an output file name
cfg is in the format
cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value']
"""
print "[ stdp_table_generator ] : Writing file", out_filename
f = open(out_filename, mode='w+')
print "Writing headers"
f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words']))
s = calc_STDP_table(cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value'])
f.write(packArray(s,'<b'))
f.write(pack("<h", cfg['L_parameter']))
f.close()
print "Done!"
def compile_stdp_table_from_db(db):
print "\n[ stdp_table_generator ] : calculating STDP tables"
plasticity_parameters = db.get_plasticity_parameters()
if len(plasticity_parameters) < 1:
print "[ stdp_table_generator ] : Nothing to do...\n"
return
for p in plasticity_parameters:
if DEBUG: print p
out_file_name = BINARY_DIRECTORY + "stdp_table_" + str(p['x']) + "_" + str(p['y']) + "_" + str(p['p']) + ".dat"
# FIXME read defaults from pacman cfg
parameters = eval (p['parameters'])
if DEBUG: print parameters
if 'ltd_time_window' not in parameters.keys(): parameters['ltd_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltd_time_window')
if 'ltp_time_window' not in parameters.keys(): parameters['ltp_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltp_time_window')
if 'words' not in parameters.keys(): parameters['words'] = pacman_configuration.getint('stdp_table_generator', 'words')
if 'zero_value' not in parameters.keys(): parameters['zero_value'] = eval(pacman_configuration.get('stdp_table_generator', 'zero_value'))
if DEBUG:
print "[ stdp_table_generator ] : parameters: ", parameters
print "[ stdp_table_generator ] : p: ", p
if p['method'] == 'FullWindow':
print "[ stdp_table_generator ] : computing STDP table for FullWindow rule"
compile_stdp_table(parameters, out_file_name)
if p['method'] == 'SpikePairRule':
print "[ stdp_table_generator ] : computing STDP table for SpikePair rule"
compile_stdp_table(parameters, out_file_name)
if p['method'] == 'TimeToSpike':
print "[ stdp_table_generator ] : computing STDP table for TimeToSpike rule"
compile_stdp_tts_table(parameters, out_file_name)
if __name__ == "__main__":
db = load_db(sys.argv[1]) # IMPORTS THE DB (it will also load the model libraray by default)
compile_stdp_table_from_db(db)
| [
"francesco@inspiron.local"
] | francesco@inspiron.local |
f2a7ee60c707d01abd0cb97f85cf647ce9ebf4e3 | a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb | /article/migrations/0006_auto_20210311_1721.py | 116d4f2900f9f0f393ad9eb58894d557a6c11b5c | [] | no_license | Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev | 5a7f210e51f1998e5d52cdeb42538f2786af3f9f | fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1 | refs/heads/master | 2023-05-03T17:01:59.066596 | 2021-05-26T13:28:41 | 2021-05-26T13:28:41 | 368,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Generated by Django 3.1.6 on 2021-03-11 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0005_auto_20210311_1319'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='tags',
new_name='tags_old',
),
]
| [
"kurbanalieverlan@gmail.com"
] | kurbanalieverlan@gmail.com |
6fe7640c64822df4cca889a856f9099d33231595 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02860/s554783475.py | ba781c1a512917a311a200fc59b2e495d4dab5c5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | n = int(input())
s = input()
if (n%2 == 1):
print("No")
else:
c = 0
for i in range(int(n/2)):
if (s[i] != s[i + int(n/2)]):
c = 1
if (c == 0):
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1a7193f2ab76638143eedaff1d1f49fa6a2291d0 | b0504df295f3738827184f1aed86b48c0303e7ca | /data/pdbbind/example_conjoint_pdb/ecfp-pdb-refined2019-pocket.py | 7e4cc1038cb051f05001a762b7f2bed7a465b735 | [] | no_license | jank3/AlogP-DL | 3c47098cff36551518eb23629ceca809b4e366ea | e4029cc76dce5b196c4eebe4a66a560b7fe14e0c | refs/heads/master | 2023-03-18T17:47:40.055718 | 2021-02-28T07:49:46 | 2021-02-28T07:49:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py |
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
ligand = Chem.MolFromPDBFile('11gs_ligand.pdb')
pocketr1 = Chem.MolFromPDBFile('11gs_pocket_clean.pdb')
featureL=AllChem.GetMorganFingerprintAsBitVect(ligand,2,nBits = 1024)
featureL1=AllChem.GetMorganFingerprintAsBitVect(pocketr1, 2,nBits = 1024)
features=[]
features=['11gs']
features.extend(featureL.ToBitString())
features.extend(featureL1.ToBitString())
with open('ecfp-pocket-refined2019.txt', 'a') as f:
f.write(','.join([str(x) for x in features]))
f.write('\n')
exit()
| [
"xlxsdu@163.com"
] | xlxsdu@163.com |
1ff773919aec1f3c3dc117cc8f3db600db5c9e89 | ad5b4790cf04b65f93729c56961d2feb3c6194cb | /tools/cpplint/setup.py | 030ea14ef2092c83e99ebe0ecf65808f32aac0ba | [
"BSD-3-Clause",
"MIT"
] | permissive | BoogeeDoo/mt19937 | 5b795e1f7221ef5a331824e745dc89610ead1f7e | 56f0f3f80cee8ec76d08c84a413b9dfc8928b8f7 | refs/heads/master | 2023-07-19T21:41:39.414715 | 2022-06-01T14:45:23 | 2022-06-01T14:45:23 | 117,514,446 | 4 | 1 | MIT | 2022-11-20T13:19:26 | 2018-01-15T07:53:45 | C++ | UTF-8 | Python | false | false | 2,955 | py | #! /usr/bin/env python
from setuptools import setup, Command
from subprocess import check_call
from distutils.spawn import find_executable
import cpplint as cpplint
class Cmd(Command):
'''
Superclass for other commands to run via setup.py, declared in setup.cfg.
These commands will auto-install setup_requires in a temporary folder.
'''
user_options = [
('executable', 'e', 'The executable to use for the command')
]
def initialize_options(self):
self.executable = find_executable(self.executable)
def finalize_options(self):
pass
def execute(self, *k):
check_call((self.executable,) + k)
class Lint(Cmd):
'''run with python setup.py lint'''
description = 'Run linting of the code'
user_options = Cmd.user_options + [
('jobs', 'j', 'Use multiple processes to speed up the linting')
]
executable = 'pylint'
def run(self):
self.execute('cpplint.py')
# some pip versions bark on comments (e.g. on travis)
def read_without_comments(filename):
with open(filename) as f:
return [line for line in f.read().splitlines() if not len(line) == 0 and not line.startswith('#')]
test_required = read_without_comments('test-requirements')
setup(name='cpplint',
version=cpplint.__VERSION__,
py_modules=['cpplint'],
# generate platform specific start script
entry_points={
'console_scripts': [
'cpplint = cpplint:main'
]
},
install_requires=[],
url='https://github.com/cpplint/cpplint',
download_url='https://github.com/cpplint/cpplint',
keywords=['lint', 'python', 'c++'],
maintainer='cpplint Developers',
maintainer_email='see_github@nospam.com',
classifiers=['Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: C++',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: Software Development :: Quality Assurance',
'License :: Freely Distributable'],
description='Automated checker to ensure C++ files follow Google\'s style guide',
long_description=open('README.rst').read(),
license='BSD-3-Clause',
setup_requires=[
"pytest-runner"
],
tests_require=test_required,
# extras_require allow pip install .[dev]
extras_require={
'test': test_required,
'dev': read_without_comments('dev-requirements') + test_required
},
cmdclass={
'lint': Lint
})
| [
"i@2333.moe"
] | i@2333.moe |
b2e9aef98ce8e65f58c90611607ae2f1481b8d51 | 3bbf917e4525d84f4c42752cda3b072d83fbd77d | /Labs/Persisting Data/MoviesItemOps06.py | 88a449484d1f16322df20543e2fe93b17deb4983 | [] | no_license | renan-suetsugu/WorkshopPythonOnAWS | 2fe891efe779802bdf497ce57c9a042886fbe3a2 | 0dee38d6cb24f5a33c5ac48409c6112fb57bab0a | refs/heads/main | 2023-08-03T07:45:07.785458 | 2021-09-08T19:21:37 | 2021-09-08T19:21:37 | 400,149,268 | 0 | 0 | null | 2021-09-08T19:17:40 | 2021-08-26T11:47:11 | Python | UTF-8 | Python | false | false | 993 | py | import boto3
from botocore.exceptions import ClientError
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
print("Tentando uma exclução condicional...")
try:
response = table.delete_item(
Key={
'year': year,
'title': title
},
)
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
print(e.response['Error']['Message'])
else:
raise
else:
print("Item deletado com sucesso:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
| [
"noreply@github.com"
] | renan-suetsugu.noreply@github.com |
66f7f0ea804830c3090d2f78537d4b535a84b454 | 0bcfdf3ba3a0083a5254388bd8bd1d24bdb70e2a | /app/models.py | c44d7700e6af934c5cdb1ecf709c97b00c275fe9 | [] | no_license | Quinnan-Gill/microblog | 1d216d6a6f49162080a13209e51d13c2c7169af7 | c0b34a529b3434b1f29c139a90082c1b41b511ff | refs/heads/master | 2022-12-10T15:29:17.055685 | 2019-09-12T03:01:58 | 2019-09-12T03:01:58 | 165,971,072 | 0 | 0 | null | 2022-12-08T01:33:25 | 2019-01-16T04:03:10 | Python | UTF-8 | Python | false | false | 10,901 | py | import json
import redis
import rq
import base64
import os
from time import time
from datetime import datetime, timedelta
from hashlib import md5
from time import time
from flask import current_app, url_for
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from app import db, login
from app.search import add_to_index, remove_from_index, query_index
class SearchableMixin(object):
@classmethod
def search(cls, expression, page, per_page):
ids, total = query_index(cls.__tablename__, expression, page, per_page)
if total == 0:
return cls.query.filter_by(id=0), 0
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return cls.query.filter(cls.id.in_(ids)).order_by(
db.case(when, value=cls.id)), total
@classmethod
def before_commit(cls, session):
session._changes = {
'add': list(session.new),
'update': list(session.dirty),
'delete': list(session.deleted)
}
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['update']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['delete']:
if isinstance(obj, SearchableMixin):
remove_from_index(obj.__tablename__, obj)
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, per_page, endpoint, **kwargs):
resources = query.paginate(page, per_page, False)
data = {
'items': [item.to_dict() for item in resources.items],
'_meta': {
'page': page,
'per_page': per_page,
'total_page': resources.pages,
'total_items': resources.total
},
'_links': {
'self': url_for(endpoint, page=page, per_page=per_page,
**kwargs),
'next': url_for(endpoint, page=page + 1, per_page=per_page,
**kwargs) if resources.has_next else None,
'prev': url_for(endpoint, page=page - 1, per_page=per_page,
**kwargs) if resources.has_prev else None
}
}
return data
db.event.listen(db.session, 'before_commit', SearchableMixin.before_commit)
db.event.listen(db.session, 'after_commit', SearchableMixin.after_commit)
followers = db.Table(
'followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(PaginatedAPIMixin, UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
messages_sent = db.relationship('Message', foreign_keys='Message.sender_id',
backref='author', lazy='dynamic')
messages_received = db.relationship('Message',
foreign_keys='Message.recipient_id',
backref='recipient', lazy='dynamic')
last_message_read_time = db.Column(db.DateTime)
notifications = db.relationship('Notification', backref='user',
lazy='dynamic')
tasks = db.relationship('Task', backref='user', lazy='dynamic')
token = db.Column(db.String(32), index=True, unique=True)
token_expiration = db.Column(db.DateTime)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
followed = Post.query.join(
followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id)
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
def new_messages(self):
last_read_time = self.last_message_read_time or datetime(1900, 1, 1)
return Message.query.filter_by(recipient=self).filter(
Message.timestamp > last_read_time).count()
def add_notification(self, name, data):
self.notifications.filter_by(name=name).delete()
n = Notification(name=name, payload_json=json.dumps(data), user=self)
db.session.add(n)
return n
def launch_task(self, name, description, *args, **kwargs):
rq_job = current_app.task_queue.enqueue('app.tasks.' + name, self.id,
*args, **kwargs)
task = Task(id=rq_job.get_id(), name=name, description=description,
user=self)
db.session.add(task)
return task
def get_tasks_in_progress(self):
return Task.query.filter_by(user=self, complete=False).all()
def get_task_in_progress(self, name):
return Task.query.filter_by(name=name, user=self,
complete=False).first()
def get_tasks_in_progress(self, name):
return Task.query.filter_by(name=name, user=self,
complete=False).first()
def to_dict(self, include_email=False):
data = {
'id': self.id,
'username': self.username,
'last_seen': self.last_seen.isoformat() + 'Z',
'about_me': self.about_me,
'post_count': self.posts.count(),
'follower_count': self.followers.count(),
'followed_count': self.followed.count(),
'_links': {
'self': url_for('api.get_user', id=self.id),
'followers': url_for('api.get_followers', id=self.id),
'followed': url_for('api.get_followed', id=self.id),
'avatar': self.avatar(128)
}
}
if include_email:
data['email'] = self.email
return data
def from_dict(self, data, new_user=False):
for field in ['username', 'email', 'about_me']:
if field in data:
setattr(self, field, data[field])
if new_user and 'password' in data:
self.set_password(data['password'])
def get_token(self, expires_in=3600):
now = datetime.utcnow()
if self.token and self.token_expiration > now + timedelta(seconds=60):
return self.token
self.token = base64.b64encode(os.urandom(24)).decode('utf-8')
self.token_expiration = now + timedelta(seconds=expires_in)
db.session.add(self)
return self.token
def revoke_token(self):
self.token_expiration = datetime.utcnow() - timedelta(seconds=1)
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'],
algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
@staticmethod
def check_token(token):
user = User.query.filter_by(token=token).first()
if user is None or user.token_expiration < datetime.utcnow():
return None
return user
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class Post(SearchableMixin, db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
return '<Post {}>'.format(self.body)
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('user.id'))
recipient_id = db.Column(db.Integer, db.ForeignKey('user.id'))
body = db.Column(db.String(150))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Message {}>'.format(self.body)
class Notification(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
timestamp = db.Column(db.Float, index=True, default=time)
payload_json = db.Column(db.Text)
def get_data(self):
return json.loads(str(self.payload_json))
class Task(db.Model):
id = db.Column(db.String(36), primary_key=True)
name = db.Column(db.String(128), index=True)
description = db.Column(db.String(128))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
complete = db.Column(db.Boolean, default=False)
def get_rq_job(self):
try:
rq_job = rq.job.Job.fetch(self.id, connection=current_app.redis)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def get_process(self):
job = self.get_rq_job()
return job.meta.get('progress', 0) if job is not None else 100 | [
"quinnan.gill@gmail.com"
] | quinnan.gill@gmail.com |
6ae2804678615a3a1654175705d975799f861089 | 0b67530ca1ed53251c343b38332ea7f61c18c1c5 | /cmd123.py | 84d2c5a452f8cf1d8fddb2175c5cc96c0b61a40f | [] | no_license | NikhilChaudhari11/nik1 | c4b27d305956e1560d333f962e0ee8db7760fa1a | fc129980c6f484e0c6f797fa3d20c4c0e095ef74 | refs/heads/master | 2020-12-11T09:07:47.073064 | 2018-01-12T17:21:49 | 2018-01-12T17:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import re
import time
import os
def cmd1(line):
line = re.split("\n+", line)
def batch1(line):
list11 = []
for i in line:
#print(i)
if(type(i) != 'None' and len(i) != 0):
#print(i.split()[0][0])
e = i.split()[0][0]
if( e != '#'):
i=i.lstrip().rstrip()
list11.append(i)
return(list11)
list2 = batch1(line)
for i in list2:
time.sleep(0.15)
os.popen(i)
time.sleep(0.15)
| [
"nikhilc11@gmail.com"
] | nikhilc11@gmail.com |
866fcd777ed57198ecc587fa85d3a71e6974ea99 | 9d1491368c5e87760131ba27d252ee2d10620433 | /gammapy/spectrum/powerlaw.py | 39edaeca1329962422682f6d153c6cf79d653ff1 | [
"BSD-3-Clause"
] | permissive | cnachi/gammapy | f9295306a8e81d0b7f4d2111b3fa3679a78da3f7 | 3d3fc38c111d2f490d984082750f8003580fe06c | refs/heads/master | 2021-01-20T23:37:59.409914 | 2016-06-09T08:36:33 | 2016-06-09T08:36:33 | 60,764,807 | 0 | 0 | null | 2016-06-09T09:55:54 | 2016-06-09T09:55:54 | null | UTF-8 | Python | false | false | 6,540 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Power law spectrum helper functions.
Convert differential and integral fluxes with error propagation.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
__all__ = [
'power_law_evaluate',
'power_law_pivot_energy',
'df_over_f',
'power_law_flux',
'power_law_integral_flux',
'g_from_f',
'g_from_points',
'I_from_points',
'f_from_points',
'f_with_err',
'I_with_err',
'compatibility',
]
E_INF = 1e10 # practically infinitely high flux
g_DEFAULT = 2
def power_law_evaluate(energy, norm, gamma, energy_ref):
r"""Differential flux at a given energy.
.. math:: f(energy) = N (E / E_0) ^ - \Gamma
with norm ``N``, energy ``E``, reference energy ``E0`` and spectral index :math:`\Gamma`.
Parameters
----------
energy : array_like
Energy at which to compute the differential flux
gamma : array_like
Power law spectral index
"""
return norm * (energy / energy_ref) ** (-gamma)
def power_law_pivot_energy(energy_ref, f0, d_gamma, cov):
"""Compute pivot (a.k.a. decorrelation) energy.
Defined as smallest df / f.
Reference: http://arxiv.org/pdf/0910.4881
"""
pivot_energy = energy_ref * np.exp(cov / (f0 * d_gamma ** 2))
return pivot_energy
def df_over_f(e, e0, f0, df0, dg, cov):
"""Compute relative flux error at any given energy.
Used to draw butterflies.
Reference: http://arxiv.org/pdf/0910.4881 Equation (1)
"""
term1 = (df0 / f0) ** 2
term2 = 2 * cov / f0 * np.log(e / e0)
term3 = (dg * np.log(e / e0)) ** 2
return np.sqrt(term1 - term2 + term3)
def _conversion_factor(g, e, e1, e2):
"""Conversion factor between differential and integral flux."""
# In gamma-ray astronomy only falling power-laws are used.
# Here we force this, i.e. give "correct" input even if the
# user gives a spectral index with an incorrect sign.
g = np.abs(g)
term1 = e / (-g + 1)
term2 = (e2 / e) ** (-g + 1) - (e1 / e) ** (-g + 1)
return term1 * term2
def power_law_flux(I=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute differential flux for a given integral flux.
Parameters
----------
I : array_like
Integral flux in ``energy_min``, ``energy_max`` band
g : array_like
Power law spectral index
e : array_like
Energy at which to compute the differential flux
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Differential flux at ``energy``.
"""
return I / _conversion_factor(g, e, e1, e2)
def power_law_integral_flux(f=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute integral flux for a given differential flux.
Parameters
----------
f : array_like
Differential flux at ``energy``
g : array_like
Power law spectral index
e : array_like
Energy at which the differential flux is given
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Integral flux in ``energy_min``, ``energy_max`` band
"""
return f * _conversion_factor(g, e, e1, e2)
def g_from_f(e, f, de=1):
"""Spectral index at a given energy e for a given function f(e)"""
e1, e2 = e, e + de
f1, f2 = f(e1), f(e2)
return g_from_points(e1, e2, f1, f2)
def g_from_points(e1, e2, f1, f2):
"""Spectral index for two given differential flux points"""
return -np.log(f2 / f1) / np.log(e2 / e1)
def I_from_points(e1, e2, f1, f2):
"""Integral flux in energy bin for power law"""
g = g_from_points(e1, e2, f1, f2)
pl_int_flux = (f1 * e1 / (-g + 1) *
((e2 / e1) ** (-g + 1) - 1))
return pl_int_flux
def f_from_points(e1, e2, f1, f2, e):
"""Linear interpolation"""
e1 = np.asarray(e1, float)
e2 = np.asarray(e2, float)
f1 = np.asarray(f1, float)
f2 = np.asarray(f2, float)
e = np.asarray(e, float)
logdy = np.log(f2 / f1)
logdx = np.log(e2 / e1)
logy = np.log(f1) + np.log(e / e1) * (logdy / logdx)
return np.exp(logy)
def f_with_err(I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
I = unumpy.uarray(I_val, I_err)
g = unumpy.uarray(g_val, g_err)
_f = power_law_flux(I, g, e, e1, e2)
f_val = unumpy.nominal_values(_f)
f_err = unumpy.std_devs(_f)
return f_val, f_err
def I_with_err(f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
f = unumpy.uarray(f_val, f_err)
g = unumpy.uarray(g_val, g_err)
_I = power_law_integral_flux(f, g, e, e1, e2)
I_val = unumpy.nominal_values(_I)
I_err = unumpy.std_devs(_I)
return I_val, I_err
def compatibility(par_low, par_high):
"""Quantify spectral compatibility of power-law
measurements in two energy bands.
Reference: 2008ApJ...679.1299F Equation (2)
Compute spectral compatibility parameters for the
situation where two power laws were measured in a low
and a high spectral energy band.
par_low and par_high are the measured parameters,
which must be lists in the following order:
e, f, f_err, g, g_err
where e is the pivot energy, f is the flux density
and g the spectral index
"""
# Unpack power-law paramters
e_high, f_high, f_err_high, g_high, g_err_high = par_high
e_low, f_low, f_err_low, g_low, g_err_low = par_low
log_delta_e = np.log10(e_high) - np.log10(e_low)
log_delta_f = np.log10(f_high) - np.log10(f_low)
# g_match is the index obtained by connecting the two points
# with a power law, i.e. a straight line in the log_e, log_f plot
g_match = -log_delta_f / log_delta_e
# sigma is the number of standar deviations the match index
# is different from the measured index in one band.
# (see Funk et al. (2008ApJ...679.1299F) eqn. 2)
sigma_low = (g_match - g_low) / g_err_low
sigma_high = (g_match - g_high) / g_err_high
sigma_comb = np.sqrt(sigma_low ** 2 + sigma_high ** 2)
return g_match, sigma_low, sigma_high, sigma_comb
| [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
c2a7eee1f1f4756acddc4b286a978d0c08f441ef | 968970ca6a39c6cdc02cf8a79280630afa5ebc4f | /src/main/python/countTravelTime.py | d6476dd199a1c139cd1a6e86d4e433554f0a391c | [] | no_license | jdcc2/mbdtraffic | 91a5bc4347062971057eb9ec27bc40601240117e | ad2167cf5af63a0089f69d70f35340f68040b6ab | refs/heads/master | 2021-01-20T01:04:19.137668 | 2017-01-25T13:37:06 | 2017-01-25T13:37:06 | 79,126,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | #!/usr/bin/env python
import sys
import os
import json
import pyspark
# add actual job
def doJob(rdd):
print('traffic job')
#Map column names to column indices
columns = ['measurementSiteReference','measurementSiteVersion','index','periodStart','periodEnd','numberOfIncompleteInputs','numberOfInputValuesused','minutesUsed','computationalMethod','standardDeviation','supplierCalculatedDataQuality','sCDQ_Low','sCDQ_SD','number_of_sCDQ','dataError','travelTimeType','avgVehicleFlow','avgVehicleSpeed','avgTravelTime','computationMethod','measurementEquipmentTypeUsed','measurementSiteName1','measurementSiteName2','measurementSiteNumberOfLanes', 'measurementSiteIdentification','measurementSide','accuracy','period','specificLane','specificVehicleCharacteristics','startLocatieForDisplayLat','startLocatieForDisplayLong','LocationCountryCode','LocationTableNumber','LocationTableVersion','alertCDirectionCoded','specificLocation','offsetDistance','LOC_TYPE','LOC_DES','ROADNUMBER','ROADNAME,FIRST_NAME,SECND_NAME','messageType','publicationTime','deducedNoTrafficMinutes','carriageway']
columnToIndex = {}
for index, column in enumerate(columns):
columnToIndex[column] = index
#print(columnToIndex)
#Filter rows with data errors
clean = rdd.map(lambda line: line.split(',')).filter(lambda row: len(row) > 18 and row[columnToIndex['dataError']] != '1')
#total = clean.count()
usable = clean.filter(lambda row: row[columnToIndex['avgTravelTime']] != '')
print("Row count with avgTravelTime: ", usable.count())
return usable
def main():
# parse arguments
in_dir, out_dir = sys.argv[1:]
conf = pyspark.SparkConf().setAppName("%s %s %s" % (os.path.basename(__file__), in_dir, out_dir))
sc = pyspark.SparkContext(conf=conf)
# invoke job and put into output directory
doJob(sc.textFile(in_dir)).saveAsTextFile(out_dir)
if __name__ == '__main__':
main()
| [
"jd@leetbook"
] | jd@leetbook |
a96020623f1f41176402c5c4583499aab4707dc0 | 7620448f67684c814121a6b772a824b792e43b5f | /utilities/annotate_from_genomic_features.py | f8ed6df46a00f868b17132d811bd70ae0311144d | [
"Apache-2.0"
] | permissive | Sisov/AlignQC | 2c2dd952d0d864a8d84daa86260b8ac5e8d1d9eb | f0677876408371ced09ba15b586489b9139828f4 | refs/heads/master | 2021-01-11T07:52:07.209342 | 2016-09-02T19:19:11 | 2016-09-02T19:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,263 | py | #!/usr/bin/python
import sys, argparse, gzip, re, os, inspect
#bring in the folder to the path for our utilities
pythonfolder_loc = "../pylib"
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],pythonfolder_loc)))
if cmd_subfolder not in sys.path:
sys.path.insert(0,cmd_subfolder)
from Bio.Format.GPD import GPDStream
from Bio.Range import merge_ranges, GenomicRange, subtract_ranges, BedArrayStream, sort_ranges
from Bio.Stream import MultiLocusStream
def main(args):
inf = None
chrlens = {}
chrbed = []
if re.search('\.gz$',args.chromosome_lengths):
inf = gzip.open(args.chromosome_lengths)
else:
inf = open(args.chromosome_lengths)
for line in inf:
f = line.rstrip().split("\t")
chrlens[f[0]] = f[1]
chrbed.append(GenomicRange(f[0],1,f[1]))
inf.close()
inf = None
exonbed = []
txbed = []
sys.stderr.write("Reading Exons\n")
if re.search('\.gz$',args.annotation_gpd):
inf = gzip.open(args.annotation_gpd)
else:
inf = open(args.annotation_gpd)
gs = GPDStream(inf)
for gpd in gs:
exonbed += [x.get_range() for x in gpd.exons]
txbed.append(gpd.get_range())
inf.close()
sys.stderr.write("Merging "+str(len(txbed))+" transcripts\n")
txbed = merge_ranges(txbed)
sys.stderr.write(str(len(txbed))+" transcripts after merging\n")
sys.stderr.write("Finding intergenic\n")
intergenicbed = subtract_ranges(chrbed,txbed)
sys.stderr.write("Found "+str(len(intergenicbed))+" intergenic regions\n")
intergenicbp = sum([x.length() for x in intergenicbed])
sys.stderr.write("Intergenic size: "+str(intergenicbp)+"\n")
sys.stderr.write("Merging "+str(len(exonbed))+" exons\n")
exonbed = merge_ranges(exonbed)
sys.stderr.write(str(len(exonbed))+" exons after merging\n")
sys.stderr.write("Finding introns\n")
intronbed = subtract_ranges(txbed,exonbed)
sys.stderr.write("Found "+str(len(intronbed))+" introns\n")
chrbp = sum([x.length() for x in chrbed])
sys.stderr.write("Genome size: "+str(chrbp)+"\n")
txbp = sum([x.length() for x in txbed])
sys.stderr.write("Tx size: "+str(txbp)+"\n")
exonbp = sum([x.length() for x in exonbed])
sys.stderr.write("Exon size: "+str(exonbp)+"\n")
intronbp = sum([x.length() for x in intronbed])
sys.stderr.write("Intron size: "+str(intronbp)+"\n")
#sys.stderr.write(str(txbp+intergenicbp)+"\n")
if args.output_beds:
if not os.path.exists(args.output_beds): os.makedirs(args.output_beds)
with open(args.output_beds+'/chrs.bed','w') as of1:
for rng in chrbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/exon.bed','w') as of1:
for rng in exonbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intron.bed','w') as of1:
for rng in intronbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intergenic.bed','w') as of1:
for rng in intergenicbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/tx.bed','w') as of1:
for rng in txbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.reads_gpd)
reads = {}
gs = GPDStream(inf)
for gpd in gs:
reads[gpd.get_gene_name()] = {}
sys.stderr.write("Checking "+str(len(reads.keys()))+" Aligned Reads\n")
#now we know all features we can annotate reads
sys.stderr.write("Read through our reads and bed entries\n")
sys.stderr.write("Annotate exons\n")
exons = annotate_gpds(args,exonbed)
exonnames = set(exons.keys())
sys.stderr.write("Annotate intron\n")
intron = annotate_gpds(args,intronbed)
intronnames = set(intron.keys())
sys.stderr.write("Annotate intergenic\n")
intergenic = annotate_gpds(args,intergenicbed)
intergenicnames = set(intergenic.keys())
allnames = exonnames|intronnames|intergenicnames
sys.stderr.write(str(len(allnames))+" reads attributed to a feature\n")
vals = set(reads.keys())-allnames
if len(vals) > 0:
sys.stderr.write("WARNING unable to ascribe annotation to "+str(len(vals))+" reads\n")
donenames = set()
of = sys.stdout
if args.output:
if re.search('\.gz$',args.output):
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
for name in allnames:
exonfrac = 0
intronfrac = 0
intergenicfrac = 0
readlen = 0
exoncount = 0
if name in exons:
exonfrac = float(exons[name][1])/float(exons[name][0])
readlen = exons[name][0]
exoncount = exons[name][2]
if name in intron:
intronfrac = float(intron[name][1])/float(intron[name][0])
readlen = intron[name][0]
exoncount = intron[name][2]
if name in intergenic:
intergenicfrac = float(intergenic[name][1])/float(intergenic[name][0])
readlen = intergenic[name][0]
exoncount = intergenic[name][2]
vals = {'exon':exonfrac,'intron':intronfrac,'intergenic':intergenicfrac}
type = None
if exonfrac >= 0.5:
type = 'exon'
elif intronfrac >= 0.5:
type = 'intron'
elif intergenicfrac >= 0.5:
type = 'intergenic'
else:
type = sorted(vals.keys(),key=lambda x: vals[x])[-1]
if vals[type] == 0:
sys.stderr.write("WARNING trouble setting type\n")
if not type: continue
of.write(name+"\t"+type+"\t"+str(exoncount)+"\t"+str(readlen)+"\n")
of.close()
def annotate_gpds(args,inputbed):
bas = BedArrayStream(sort_ranges(inputbed))
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.args.reads_gpd)
gs = GPDStream(inf)
mls = MultiLocusStream([gs,bas])
results = {}
for es in mls:
[gpds,inbeds] = es.get_payload()
if len(gpds) == 0 or len(inbeds) == 0:
continue
v = annotate_inner(gpds,inbeds)
for res in v:
results[res[0]]=res[1:]
inf.close()
return results
def annotate_inner(gpds,inbeds):
results = []
for gpd in gpds:
orig = gpd.get_length()
tot = 0
for rng1 in [x.get_range() for x in gpd.exons]:
tot += sum([y.overlap_size(rng1) for y in inbeds])
if tot > 0:
results.append([gpd.get_gene_name(),orig,tot,gpd.get_exon_count()])
return results
def do_inputs():
parser = argparse.ArgumentParser(description="Assign genomic features to reads based on where they majority of the read lies. In the event of a tie prioritize exon over intron and intron over intergenic.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('reads_gpd',help="reads gpd")
parser.add_argument('annotation_gpd',help="reference annotations gpd")
parser.add_argument('chromosome_lengths',help="reference lengths table")
parser.add_argument('--output_beds',help="save features")
parser.add_argument('-o','--output',help="output results")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd.split()
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
| [
"jason.weirather@gmail.com"
] | jason.weirather@gmail.com |
ddcaf6e28b533963df17ac8f9f13f4ce3c77631f | 1581f1d66d6835b2c271295e3251c2dde239fec8 | /payment_gateway/pg_utils.py | 6036c701e7036016bef878326b20e168433fab8a | [] | no_license | abinash-kumar/pythod | 527659e3bdd161f9abcaaa9182dfe58044b3ff66 | 1469dc0cd9d6d72b2fe2e69f99542e470bea807b | refs/heads/master | 2023-01-30T02:54:10.729606 | 2020-02-24T07:18:51 | 2020-02-24T07:18:51 | 242,670,715 | 0 | 0 | null | 2023-01-25T13:57:52 | 2020-02-24T07:16:02 | Python | UTF-8 | Python | false | false | 2,318 | py | from motor_product import prod_utils as mpu
from health_product import prod_utils as hpu
HEALTH_INSURER_SLUG = {
'the-oriental-insurance-company-ltd': 'oriental'
}
def resolve_utils(transaction):
if transaction.product_type == 'motor':
return mpu
elif transaction.product_type == 'health':
return hpu
else:
return None
def process_payment_response(request, response, transaction):
if transaction.product_type == 'motor':
return mpu.process_payment_response(
request,
mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type],
get_insurer_slug(transaction),
response,
transaction.transaction_id
)
elif transaction.product_type == 'health':
return hpu.process_payment_response(
transaction.slab.health_product.insurer.id,
response,
transaction
)
else:
return None
def get_insurer_slug(transaction):
if transaction.product_type == 'motor':
return transaction.insurer.slug
elif transaction.product_type == 'health':
return HEALTH_INSURER_SLUG[transaction.slab.health_product.insurer.slug]
else:
return None
def get_error_url(transaction):
if transaction.product_type == 'motor':
vehicle_type = mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type]
return '/motor/' + vehicle_type + '/product/failure/'
elif transaction.product_type == 'health':
return '/health-plan/payment/transaction/%s/failure/' % transaction.transaction_id
else:
return None
def todict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast())
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
| [
"abinashlv@AbinashSymboMac.local"
] | abinashlv@AbinashSymboMac.local |
af415894f66167bbebd63ee550eeff6774fea102 | c9837ea5229fce8a13dc28b8efe583e6b1f80f06 | /tests/acceptance/test_async.py | 13e29325278b60432ce81e30cf21f808f32fc48d | [
"MIT"
] | permissive | appetito/procrastinate | 51de7e4e7e216514c4c417e0d496fdf968332092 | 5e47d99ede5fafc5717765ebde3e2782b131672a | refs/heads/master | 2022-09-18T19:49:24.958649 | 2020-06-03T08:03:19 | 2020-06-03T08:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | import pytest
import procrastinate
pytestmark = pytest.mark.asyncio
@pytest.fixture
def pg_app(pg_connector):
return procrastinate.App(connector=pg_connector)
async def test_defer(pg_app):
sum_results = []
product_results = []
@pg_app.task(queue="default", name="sum_task")
def sum_task(a, b):
sum_results.append(a + b)
@pg_app.task(queue="default", name="product_task")
async def product_task(a, b):
product_results.append(a * b)
await sum_task.defer_async(a=1, b=2)
await sum_task.configure().defer_async(a=3, b=4)
await pg_app.configure_task(name="sum_task").defer_async(a=5, b=6)
await product_task.defer_async(a=3, b=4)
await pg_app.run_worker_async(queues=["default"], wait=False)
assert sum_results == [3, 7, 11]
assert product_results == [12]
| [
"joachim.jablon@people-doc.com"
] | joachim.jablon@people-doc.com |
0e188befbac224d8224dc6e6649007c2d0ccc5b5 | 8b1dcac39acfcee0f573dc71d608671dea2062a2 | /tools/hikyuu/interactive/draw/__init__.py | fcdb11396c845625805c5eebb3c406cd9deb7ab1 | [
"MIT"
] | permissive | eightwind/hikyuu | 4c876170b1e298105e7eaf9675b310ad378dd9a4 | 4dab98a93e2a9847f77d615d6900067fbf90b73d | refs/heads/master | 2021-08-26T05:32:39.813080 | 2017-11-21T18:59:16 | 2017-11-21T18:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | __version__ = "Only for pip dist" | [
"fasiondog@163.com"
] | fasiondog@163.com |
1109d37bf0366a1327a89aea8da48513a50ab171 | 62ccd6d2d3e10a4587c8e35a98879840656afe6a | /Bike.py | 30cd82c283c91fe6fe4cddc05843116def60d616 | [] | no_license | Swills2/python_OOp | b37bd5ee08f6f7e710219f1e6cb55f354f65bf5e | 8642fb765f784412f9b9d085f19d5810eef7e941 | refs/heads/master | 2020-04-07T10:56:39.132877 | 2018-11-20T00:22:56 | 2018-11-20T00:22:56 | 158,306,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | class Bike:
def __init__(self, price, max_speed, miles = 0):
self.price = price
self.max_speed = []
self.miles = miles
def displayInfo(self, price, max_speed, miles):
print(price, max_speed, miles)
return self
def ride(self, miles):
print("Riding")
miles += 10
return self
def reverse(self, miles):
print("Reversing")
miles -= 5
if miles < 0:
miles = 0
return self
| [
"swills0055@gmail.com"
] | swills0055@gmail.com |
8224ec2ea7bc83f7d68a0df94cbee6f1ccdee3ae | 6fa6288bd21694bb144798d63b77a8e2924603e5 | /DataStructures/arrays/codility/cheap_letter_deletion.py | 3a1e1b0a156b1406efd8ebc982701ecdfd622bd2 | [] | no_license | akshatakulkarni98/ProblemSolving | 649ecd47cec0a29ccff60edb60f3456bf982c4a1 | 6765dbbde41cfc5ee799193bbbdfb1565eb6a5f5 | refs/heads/master | 2023-01-03T19:03:49.249794 | 2020-10-27T06:28:02 | 2020-10-27T06:28:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | """
You are given a string S. Deletion of the K-th letter of S costs C[K]. After deleting a letter, the costs of deleting other letters do not change. For example, for S =
"ab" and C = [1, 3], after deleting 'a', deletion of 'b' will still cost 3.
You want to delete some letters from S to obtain a string without two identical letters next to each other. What is the minimum total cost of deletions to achieve
such a string?
Write a function:
def solution(S, C)
that, given string S and array C of integers, both of length N, returns the minimum cost of all necessary deletions.
Examples:
1. Given S = "abccbd" and C = [0, 1, 2, 3, 4, 5], the function should return 2. You can delete the rst occurrence of 'c' to achieve "abcbd".
2. Given S = "aabbcc" and C = [1, 2, 1, 2, 1, 2], the function should return 3. By deleting all letters with a cost of 1, you can achieve string "abc".
3. Given S = "aaaa" and C = [3, 4, 5, 6], the function should return 12. You need to delete all but one letter 'a', and the lowest cost of deletions is 3+4+5=12.
4. Given S = "ababa" and C = [10, 5, 10, 5, 10], the function should return 0. There is no need to delete any letter.
Write an ecient algorithm for the following assumptions:
string S and array C have length equal to N;
N is an integer within the range [1..100,000];
string S consists only of lowercase letters ('a'−'z');
each element of array C is an integer within the range [0..1,000]
"""
def solution(S, C):
if not S or not C:
return -1
result=0
j=0
for i in range(1,len(S)):
if S[j]!=S[i]:
j=i
else:
min_value = min(C[i], C[j])
result+=min_value
if C[j] < C[i]:
j=i
return result
| [
"noreply@github.com"
] | akshatakulkarni98.noreply@github.com |
1572ed7e2b86b6dc9bc339d9cf970e352a1bdfa1 | f5a2059897f30a77244c0e8426f54ad5bf0db0e3 | /resources/store.py | 82c62a12820e1ed584f44fccfd130f9033054a21 | [] | no_license | colemanGH319/stores-rest-api | c25a01199af1cfcfb3a899f7d4752c382c8870a8 | 44004f0f64a6bcae4fdc80245cb524b4d7958024 | refs/heads/master | 2020-04-01T15:04:55.172168 | 2018-10-17T19:14:47 | 2018-10-17T19:14:47 | 153,320,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from flask_restful import Resource
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
return {'message': 'Store not found.'}, 404
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': "A store with the name '{}' already exists.".format(name)}, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return {'message': 'An error occurred while creating the store.'}, 500
return store.json()
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {'message': 'Store deleted'}
class StoreList(Resource):
def get(self):
return {'stores': [store.json() for store in StoreModel.query.all()]} | [
"coleman.matt319@gmail.com"
] | coleman.matt319@gmail.com |
770f1a9f35b1bc5cf04b2acf5eb206b60f5e0aa8 | f2851c0d6125fc93f6dcd9c731180484bcf3299e | /Simple_baidu_baike/baike_spider/test.py | 62c20de996f7529ee4beed2a7108f60b418a465b | [] | no_license | FrankYang3110/Simple_baidu_baike_spider | da0b7a3478486d3fd36d46f04c01536d1751ecb8 | a54161f20f002615d0454da4f6c63359b083fdee | refs/heads/master | 2020-05-03T02:09:59.214644 | 2019-04-12T03:05:26 | 2019-04-12T03:05:26 | 178,360,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# a = set()
# print(a is None)
# def get():
# return
# a = get()
# print(a is None)
from fake_useragent import UserAgent
import re
import requests
from urllib.parse import urljoin
from lxml import etree
headers = {'User-Agent': UserAgent().random}
url = 'https://baike.baidu.com/item/Python/407313?fr=aladdin'
r = requests.get(url, headers=headers)
r.encoding = r.apparent_encoding
base_url = r.url
html = r.text
tree = etree.HTML(html)
hrefs = tree.xpath('//a[contains(@href,"/item")]/@href')
# pattern = re.compile(r'href="(/item.*?)"')
# urls = re.findall(pattern, html)
# for url in urls:
# new_url = urljoin(base_url, url)
# print(new_url)
# href="/item/%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A8%8B%E5%BA%8F%E8%AE%BE%E8%AE%A1%E8%AF%AD%E8%A8%80/7073760" | [
"43460484+FrankYang3110@users.noreply.github.com"
] | 43460484+FrankYang3110@users.noreply.github.com |
f34674523abde90b1c1b6d237f4f30d76afc7788 | ff9c5a10eea701b6b6be1ec7076b5dfab91b6a38 | /ex08.py | 6a748ab59f8096a77c05fccec2e922c387077bf3 | [] | no_license | CFEsau/learnpython | 21707fc15bcb09098e83b116bd77203158a65353 | 0616bf36c1a0e3b81cb6d0f5edc70c142904ca65 | refs/heads/master | 2021-01-18T15:51:13.948611 | 2017-08-15T15:09:24 | 2017-08-15T15:09:24 | 100,386,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | #ex08: Printing, printing
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
# The above line contains an apostrophe which affects the output
"So I said goodnight."
)
| [
"c.esau@shef.ac.uk"
] | c.esau@shef.ac.uk |
df952844481362845f3f8fd712d4e353b5c9b969 | cbe4c2c2d163d2e5c611a77258ec1eb2e92b6479 | /api/migrations/0006_auto__add_field_configset_delta_name__del_unique_configset_hwtype_id_c.py | ca28f0e726f18e3e7cf855cddfb86694ed061bf4 | [] | no_license | radhakrishnaa/DCP | 20bcd6ce8143b5011310c42be858d139fb0cfa7a | c7970393811ef6686aafa4a49b96115b05ac86b6 | refs/heads/main | 2023-08-14T03:47:14.841160 | 2021-09-13T14:40:50 | 2021-09-13T14:40:50 | 406,011,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,862 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id']
db.delete_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id'])
# Adding field 'ConfigSet.delta_name'
db.add_column('config_set', 'delta_name',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']
db.create_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'])
def backwards(self, orm):
# Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']
db.delete_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'])
# Deleting field 'ConfigSet.delta_name'
db.delete_column('config_set', 'delta_name')
# Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id']
db.create_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id'])
models = {
u'api.carrier': {
'Meta': {'ordering': "['code']", 'object_name': 'Carrier', 'db_table': "'carrier'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'old_code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'api.cloudenv': {
'Meta': {'ordering': "['order', 'short_name']", 'object_name': 'CloudEnv', 'db_table': "'cloud_env'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'env_type': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
u'api.configset': {
'Meta': {'ordering': "['category_id', 'hwtype_id', 'carrier_id', 'region_id']", 'unique_together': "(('category_id', 'hwtype_id', 'carrier_id', 'region_id', 'delta_name'),)", 'object_name': 'ConfigSet', 'db_table': "'config_set'"},
'carrier_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Carrier']", 'null': 'True', 'db_column': "'carrier_id'", 'blank': 'True'}),
'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'delta_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}),
'hwtype_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Hwtype']", 'null': 'True', 'db_column': "'hwtype_id'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Region']", 'null': 'True', 'db_column': "'region_id'", 'blank': 'True'})
},
u'api.configsetting': {
'Meta': {'object_name': 'ConfigSetting', 'db_table': "'config_setting'"},
'config_version_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'db_column': "'config_version_id'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting_value_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingValue']", 'db_column': "'setting_value_id'"})
},
u'api.configversion': {
'Meta': {'object_name': 'ConfigVersion', 'db_table': "'config_version'"},
'approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approver_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_approver'", 'null': 'True', 'db_column': "'approver_id'", 'to': u"orm['api.User']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'committed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'committer_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_committer'", 'null': 'True', 'db_column': "'committer_id'", 'to': u"orm['api.User']"}),
'config_set_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'db_column': "'config_set_id'"}),
'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_editor_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_last_editor'", 'null': 'True', 'db_column': "'last_editor_id'", 'to': u"orm['api.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_publisher'", 'null': 'True', 'db_column': "'publisher_id'", 'to': u"orm['api.User']"}),
'setting_value': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['api.SettingValue']", 'null': 'True', 'through': u"orm['api.ConfigSetting']", 'blank': 'True'}),
'version_number': ('django.db.models.fields.IntegerField', [], {})
},
u'api.envtransform': {
'Meta': {'ordering': "['order', 'env_pat']", 'object_name': 'EnvTransform', 'db_table': "'env_transform'"},
'carrier_region_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'env_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hwtype_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'setting_name_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'value_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'value_sub': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'})
},
u'api.hwtype': {
'Meta': {'ordering': "['code']", 'object_name': 'Hwtype', 'db_table': "'hwtype'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'marketing_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'model_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'api.region': {
'Meta': {'ordering': "['code']", 'object_name': 'Region', 'db_table': "'region'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'api.settingcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'SettingCategory', 'db_table': "'setting_category'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'api.settingdef': {
'Meta': {'ordering': "['group', 'order', 'display_name']", 'object_name': 'SettingDef', 'db_table': "'setting_def'"},
'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}),
'datatype': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rules': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'short_help': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
u'api.settingvalue': {
'Meta': {'object_name': 'SettingValue', 'db_table': "'setting_value'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting_def_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingDef']", 'db_column': "'setting_def_id'"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'})
},
u'api.user': {
'Meta': {'ordering': "['username']", 'object_name': 'User', 'db_table': "'user'"},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'approver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
}
}
complete_apps = ['api'] | [
"rtalluri@motorola.com"
] | rtalluri@motorola.com |
265049dd5c7273612076608f805ee6f00e3f2430 | 82e0fb055637e3181c7b1c25b2c199213c130f1b | /Python/Funciones de Alto orden/Ejemplo4.py | eae624cd1e8e739a75e60ef4e2f55f361ea537d6 | [] | no_license | DangerousCode/DAM-2-Definitivo | ffd7d99a385e9d9a821887676ecd81d3e2e1ddfc | 6fcaad2342a68a6005e062bdd8603b900dcdf147 | refs/heads/master | 2021-01-10T17:58:44.570045 | 2015-12-16T15:19:25 | 2015-12-16T15:19:25 | 47,215,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | __author__ = 'AlumnoT'
'''Funcion dada una lista de numeros y un numero cota superior,
queremos devolver aquellos elementos menores a dicha
cota'''
lista=list(range(-5,5))
'''1)Modificar la sintaxis anterior para que solo nos muestre los numeros negativos'''
print filter(lambda x:x<0,lista)
'''2)Crear funcion a la que le vamos a pasar una lista de los valores 0,1,2,3,4 y esa funcion
tiene que devolvernos una lista formada por el cuadrado del primer valor con el cubo del primer valor
(con todos los valores)'''
print map(lambda x:[x*x,x*x*x],[0,1,2,3,4])
'''3)Generar dos listas una con valores numericos del 0 al 5 y otra con tres cadenas cuando ejecutemos la funcion
queremos que nnos muestre la media de la lista que contiene los numeros y que las tres cadenas de la segunda lista
aparezcan como una sola frase'''
lista=list(range(0,6))
listacad=["hola","que","tal"]
print (reduce(lambda x,z:x+z,lista))/len(lista)
print reduce(lambda a,b:a+" "+b,listacad)
'''4)Se nos va a facilitar una lista y una tupla con numeros debemos realizar una funcion que sume cada numero de la lista
con el correspondiente numero de su misma posicion en la tupla todo ello usando map,reduce,filter, lambda'''
lis=[1,2,3]
tup=(3,2,1)
print map(lambda x,y:x+y,lis,tup) | [
"asantosq1@gmail.com"
] | asantosq1@gmail.com |
1d6007a5ebcba5fca71c8d3808860c34ac1f9ede | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/addons/base/ir/ir_filters.py | 7e792068539ec5262791dfa23e1034b0a6500c7e | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,584 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import ast
from dotop import api, fields, models, _
from dotop.exceptions import UserError
class IrFilters(models.Model):
_name = 'ir.filters'
_description = 'Filters'
_order = 'model_id, name, id desc'
name = fields.Char(string='Filter Name', translate=True, required=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', default=lambda self: self._uid,
help="The user this filter is private to. When left empty the filter is public "
"and available to all users.")
domain = fields.Text(default='[]', required=True)
context = fields.Text(default='{}', required=True)
sort = fields.Text(default='[]', required=True)
model_id = fields.Selection(selection='_list_all_models', string='Model', required=True)
is_default = fields.Boolean(string='Default filter')
action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model.")
active = fields.Boolean(default=True)
@api.model
def _list_all_models(self):
self._cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return self._cr.fetchall()
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)') % self.name)
return super(IrFilters, self).copy(default)
@api.multi
def _get_eval_domain(self):
self.ensure_one()
return ast.literal_eval(self.domain)
@api.model
def _get_action_domain(self, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in', [action_id, False])]
# only global ones
return [('action_id', '=', False)]
@api.model
def get_filters(self, model, action_id=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
action_domain = self._get_action_domain(action_id)
filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])])
user_context = self.env.user.context_get()
return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort'])
@api.model
def _check_global_default(self, vals, matching_filters):
""" _check_global_default(dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises dotop.exceptions.UserError: if there is an existing default and
we're not updating it
"""
domain = self._get_action_domain(vals.get('action_id'))
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True),
])
if not defaults:
return
if matching_filters and (matching_filters[0]['id'] == defaults.id):
return
raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')})
@api.model
@api.returns('self', lambda value: value.id)
def create_or_replace(self, vals):
action_id = vals.get('action_id')
current_filters = self.get_filters(vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == vals['name'].lower()
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
domain = self._get_action_domain(action_id)
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
])
if defaults:
defaults.write({'is_default': False})
else:
self._check_global_default(vals, matching_filters)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
matching_filter = self.browse(matching_filters[0]['id'])
matching_filter.write(vals)
return matching_filter
return self.create(vals)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below). Still
# useful to keep because it provides a proper error message when a
# violation occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
@api.model_cr_context
def _auto_init(self):
result = super(IrFilters, self)._auto_init()
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
self._cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'")
if not self._cr.fetchone():
self._cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters
(lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""")
return result
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
e5131ff29aa41698036707a61a86466d77e7d3b9 | 6c50bced6fb4474e4eb2e4f3c27a5ce38b0e6048 | /manage.py | e1fbda688388d8db4449c6abeb1423356d40d79b | [] | no_license | NMShihab/WebChatApp | 0d5651fe38baccfee186e59e32c2c79de2bb39a4 | 2dda4e750c370e74bbfbc42dce02432268194d46 | refs/heads/master | 2023-02-01T22:57:53.738222 | 2020-12-15T17:09:14 | 2020-12-15T17:09:14 | 319,082,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatApi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"nmshihabislam@gmail.com"
] | nmshihabislam@gmail.com |
43730854b668cdc8e523b81d756b9615a915f5d5 | 2ff113af86a2cde69ccf114a98e3a2092f751993 | /Aula18/B - Replacing Digits/b.py | 12b37f10be0ef9aafdf7b37695fe1680e768444c | [] | no_license | Math-Gomes/ProgramacaoCompetitiva | 7874e6a3cbcfadb7d4c2366f178d69b02909daf8 | 4ce79f5cb564ba7e07fdcee0995aa476b883c7a3 | refs/heads/master | 2023-08-13T10:26:43.481381 | 2021-09-22T12:46:07 | 2021-09-22T12:46:07 | 380,554,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | if __name__ == '__main__':
a = list(map(int, list(input())))
s = list(map(int, list(input())))
while True:
max_s = max(s)
min_a = min(a)
if max_s < min_a:
break
s.remove(max_s)
a[a.index(min_a)] = max_s
print(*a, sep = '')
| [
"mathjvmf@gmail.com"
] | mathjvmf@gmail.com |
f0558330618b47efd52ea7dae4624354fe0c32ac | 89b45e528f3d495f1dd6f5bcdd1a38ff96870e25 | /pyneng/exercises/09_functions/task_9_2.py | e2a25f74f4ea48dd6a5f51879221d1048f8a5c94 | [] | no_license | imatyukin/python | 2ec6e712d4d988335fc815c7f8da049968cc1161 | 58e72e43c835fa96fb2e8e800fe1a370c7328a39 | refs/heads/master | 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 | Python | UTF-8 | Python | false | false | 2,935 | py | # -*- coding: utf-8 -*-
"""
Задание 9.2
Создать функцию generate_trunk_config, которая генерирует
конфигурацию для trunk-портов.
У функции должны быть такие параметры:
- intf_vlan_mapping: ожидает как аргумент словарь с соответствием интерфейс-VLANы
такого вида:
{'FastEthernet0/1': [10, 20],
'FastEthernet0/2': [11, 30],
'FastEthernet0/4': [17]}
- trunk_template: ожидает как аргумент шаблон конфигурации trunk-портов в виде
списка команд (список trunk_mode_template)
Функция должна возвращать список команд с конфигурацией на основе указанных портов
и шаблона trunk_mode_template. В конце строк в списке не должно быть символа
перевода строки.
Проверить работу функции на примере словаря trunk_config
и списка команд trunk_mode_template.
Если предыдущая проверка прошла успешно, проверить работу функции еще раз
на словаре trunk_config_2 и убедится, что в итоговом списке правильные номера
интерфейсов и вланов.
Пример итогового списка (перевод строки после каждого элемента сделан
для удобства чтения):
[
'interface FastEthernet0/1',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 10,20,30',
'interface FastEthernet0/2',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 11,30',
...]
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
from pprint import pprint
trunk_mode_template = [
"switchport mode trunk",
"switchport trunk native vlan 999",
"switchport trunk allowed vlan",
]
trunk_config = {
"FastEthernet0/1": [10, 20, 30],
"FastEthernet0/2": [11, 30],
"FastEthernet0/4": [17],
}
trunk_config_2 = {
"FastEthernet0/11": [120, 131],
"FastEthernet0/15": [111, 130],
"FastEthernet0/14": [117],
}
def generate_trunk_config(intf_vlan_mapping, trunk_template):
cfg = []
for intf, vlans in intf_vlan_mapping.items():
cfg.append("interface " + intf)
for s in trunk_template:
if s.endswith('allowed vlan'):
s = s + ' ' + str(vlans)[1:-1].replace(" ", "")
cfg.append(s)
return cfg
pprint(generate_trunk_config(trunk_config, trunk_mode_template))
pprint(generate_trunk_config(trunk_config_2, trunk_mode_template))
| [
"i.matyukin@gmail.com"
] | i.matyukin@gmail.com |
0a7ff4211eaca98470e2742585ac72c1dbe492de | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02415/s303347384.py | f7612caa107b4023d41f174a9952151845dbb81a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | word = input()
print(str.swapcase(word))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ac9c7f15ea1547bd32a8c41e2f64470813bf0d52 | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/client/gui/scaleform/daapi/view/meta/questswindowmeta.py | 66a92293420cda94a63d878facfa96ffceb268d2 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from gui.Scaleform.framework.entities.DAAPIModule import DAAPIModule
class QuestsWindowMeta(DAAPIModule):
pass
| [
"james.sweet88@googlemail.com"
] | james.sweet88@googlemail.com |
88881e340fd70a3969fd1822c2d1552ff989a8c7 | c56fdac92304316ebe52796d619e5e72c564475b | /hw2/perceptron.py | f55a6de69ef7e861414fa307acc847777cedc155 | [] | no_license | RamisesM/Learning-From-Data | 0cf043b0474992441d746c2aff8ef6c7134bc7e6 | dde2677773fea2bff48899371a58cebda75449ae | refs/heads/master | 2020-03-07T11:30:45.894709 | 2018-05-14T20:14:20 | 2018-05-14T20:14:20 | 127,457,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | import random
import numpy
class Point:
def __init__(self):
self.c = 1
self.x = random.uniform(-1,1)
self.y = random.uniform(-1,1)
self.vec = (self.c, self.x, self.y)
class Function:
def __init__(self):
self.f = [0, 0, 0]
def randomize(self):
points = [Point(), Point()]
self.f[2] = -1
self.f[1] = (points[1].y - points[0].y)/(points[1].x - points[0].x)
self.f[0] = points[0].y - self.f[1]*points[0].x
def classify(self, point):
value = numpy.sign(numpy.inner(point.vec, self.f))
if value == 0:
value = -1
return value
def perceptron(target_function, hypothesis, data_set):
f_set = []
for point in data_set:
f_set += [target_function.classify(point)]
h_set = []
for point in data_set:
h_set += [hypothesis.classify(point)]
# misclassified points
misclassified_set = []
for index in range(len(data_set)):
if h_set[index] != f_set[index]:
misclassified_set += [index]
number_of_iterations = 0
while len(misclassified_set) != 0:
test_index = misclassified_set[random.randint(0, len(misclassified_set)-1)]
test_point = data_set[test_index]
hypothesis.f = [hypothesis.f[i] + f_set[test_index]*test_point.vec[i] for i in range(3)]
# updating h_set
h_set = []
for point in data_set:
h_set += [hypothesis.classify(point)]
# updating misclassified_set
misclassified_set = []
for index in range(len(data_set)):
if h_set[index] != f_set[index]:
misclassified_set += [index]
number_of_iterations += 1
return number_of_iterations
| [
"ramises.martins@gmail.com"
] | ramises.martins@gmail.com |
17fa82a9093701e46b8648bd51b5684c11c5f8c9 | 5d6365f4cc81272f8c481ee31f1111e8eca6dca5 | /alipay/aop/api/domain/BizActionLogDTO.py | bdaee8dcf4791f2ea8f5f6ac64c0cb3184f154de | [
"Apache-2.0"
] | permissive | barrybbb/alipay-sdk-python-all | 9e99b56138e6ca9c0b236707c79899d396ac6f88 | 1b63620431d982d30d39ee0adc4b92463cbcee3c | refs/heads/master | 2023-08-22T20:16:17.242701 | 2021-10-11T08:22:44 | 2021-10-11T08:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,378 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizActionLogDTO(object):
def __init__(self):
self._amount = None
self._biz_budget_apply_code = None
self._biz_budget_id = None
self._biz_name = None
self._biz_type = None
self._biz_uk_id = None
self._gmt_create = None
self._gmt_modified = None
self._id = None
self._modify_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_budget_apply_code(self):
return self._biz_budget_apply_code
@biz_budget_apply_code.setter
def biz_budget_apply_code(self, value):
self._biz_budget_apply_code = value
@property
def biz_budget_id(self):
return self._biz_budget_id
@biz_budget_id.setter
def biz_budget_id(self, value):
self._biz_budget_id = value
@property
def biz_name(self):
return self._biz_name
@biz_name.setter
def biz_name(self, value):
self._biz_name = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def biz_uk_id(self):
return self._biz_uk_id
@biz_uk_id.setter
def biz_uk_id(self, value):
self._biz_uk_id = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def modify_type(self):
return self._modify_type
@modify_type.setter
def modify_type(self, value):
self._modify_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_budget_apply_code:
if hasattr(self.biz_budget_apply_code, 'to_alipay_dict'):
params['biz_budget_apply_code'] = self.biz_budget_apply_code.to_alipay_dict()
else:
params['biz_budget_apply_code'] = self.biz_budget_apply_code
if self.biz_budget_id:
if hasattr(self.biz_budget_id, 'to_alipay_dict'):
params['biz_budget_id'] = self.biz_budget_id.to_alipay_dict()
else:
params['biz_budget_id'] = self.biz_budget_id
if self.biz_name:
if hasattr(self.biz_name, 'to_alipay_dict'):
params['biz_name'] = self.biz_name.to_alipay_dict()
else:
params['biz_name'] = self.biz_name
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.biz_uk_id:
if hasattr(self.biz_uk_id, 'to_alipay_dict'):
params['biz_uk_id'] = self.biz_uk_id.to_alipay_dict()
else:
params['biz_uk_id'] = self.biz_uk_id
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.modify_type:
if hasattr(self.modify_type, 'to_alipay_dict'):
params['modify_type'] = self.modify_type.to_alipay_dict()
else:
params['modify_type'] = self.modify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizActionLogDTO()
if 'amount' in d:
o.amount = d['amount']
if 'biz_budget_apply_code' in d:
o.biz_budget_apply_code = d['biz_budget_apply_code']
if 'biz_budget_id' in d:
o.biz_budget_id = d['biz_budget_id']
if 'biz_name' in d:
o.biz_name = d['biz_name']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'biz_uk_id' in d:
o.biz_uk_id = d['biz_uk_id']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'id' in d:
o.id = d['id']
if 'modify_type' in d:
o.modify_type = d['modify_type']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
93a759dd1d4ce068810fd67a473fd7f242615fd5 | f2fcf807b441aabca1ad220b66770bb6a018b4ae | /coderbyte/StringMerge.py | aee27511c52f7fc9c13b05cde0262bec9a847235 | [] | no_license | gokou00/python_programming_challenges | 22d1c53ccccf1f438754edad07b1d7ed77574c2c | 0214d60074a3b57ff2c6c71a780ce5f9a480e78c | refs/heads/master | 2020-05-17T15:41:07.759580 | 2019-04-27T16:36:56 | 2019-04-27T16:36:56 | 183,797,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def StringMerge(string):
stringArr = string.split("*")
arr1 = stringArr[0]
arr2 = stringArr[1]
strBuild = ""
for i in range(len(arr1)):
strBuild+= arr1[i]
strBuild+= arr2[i]
return strBuild
print(StringMerge("123hg*aaabb"))
| [
"gamblecua@gmail.com"
] | gamblecua@gmail.com |
46fd7987e76562876a9df13d571ec26da2089cf7 | bcc90e2a3ef609caf24fa427061750cb7ed807ba | /Decorator/ConcreteComponent.py | 5b6989308ff4c75e35df9b51e663518ce6ef0f15 | [] | no_license | vudt93/DesignPattern | 9140eb16544b1a02da1f889f5713b499166e9046 | 3f21df6be2b46fd4f5648b6d30b450699faabcbf | refs/heads/master | 2021-03-24T01:19:56.332113 | 2020-03-17T09:36:26 | 2020-03-17T09:36:26 | 247,502,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from Decorator.Component import Component
class ConcreteComponent(Component):
def do_operation(self):
print("Operation") | [
"vu.do@cj.net"
] | vu.do@cj.net |
1989906ee223d14319cc93f1ef9c3f3bb7ce946e | addb8ac420db7328afd209639204b526edcf9a15 | /W-Maze/Tabular-Q/env.py | 4d3c5a70dc6ee69c0e79b15372fd8f60f18fcf08 | [] | no_license | logic2code/DelayResolvedRL | ee704c8d4e9b1df2867dbe0ac77ab112ee4c0d89 | a291875417a0e52fe09294d7f78ef9b3c9045b9c | refs/heads/main | 2023-07-14T04:39:42.115756 | 2021-08-13T06:57:31 | 2021-08-13T06:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,162 | py | import numpy as np
from collections import deque
class Environment:
"""Initialize Environment"""
def __init__(self, seed, delay):
np.random.seed(seed)
self.breadth = 7
self.length = 11
self.state_space = np.empty([self.breadth, self.length], dtype='<U1')
'''Environment Configuration'''
self.state_space[:] = 'E'
self.state_space[0] = 'X'
self.state_space[1:4, self.length // 2 - 2] = 'X'
self.state_space[1:4, self.length // 2 + 2] = 'X'
self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G'
self.state_space[self.breadth - 1, 0] = 'P'
'''Actions'''
self.actions = [0, 1, 2, 3] # UP, DOWN, LEFT, RIGHT
self.num_actions = len(self.actions)
self.turn_limit = 300
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.fill_up_buffer()
self.delayed_action = 0
self.state = self.reset()
def reset(self):
x = np.random.randint(self.breadth)
y = 0
starting_state = [x, y]
self.state_space[x, y] = 'P'
self.fill_up_buffer()
return starting_state
def fill_up_buffer(self):
for _ in range(self.delay):
action = np.random.choice(self.num_actions)
self.actions_in_buffer.append(action)
def step(self, state, action):
done = False
player_position = state
reward = -1
"""UP"""
if action == 0:
if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] - 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] - 1, player_position[1]]
self.state_space[player_position[0] - 1, player_position[1]] = 'P'
"""DOWN"""
if action == 1:
if player_position[0] + 1 < self.breadth \
and self.state_space[player_position[0] + 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] + 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0] + 1, player_position[1]
self.state_space[player_position[0] + 1, player_position[1]] = 'P'
"""LEFT"""
if action == 2:
if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] - 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0], player_position[1] - 1
self.state_space[player_position[0], player_position[1] - 1] = 'P'
"""RIGHT"""
if action == 3:
if player_position[1] + 1 < self.length \
and self.state_space[player_position[0], player_position[1] + 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] + 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] + 1]
self.state_space[player_position[0], player_position[1] + 1] = 'P'
return self.state, reward, done
| [
"noreply@github.com"
] | logic2code.noreply@github.com |
2a132a7f304bf03097919aab6ebca25961224c39 | 4558f88bc7b48a692599aac4d2316201e6c95a02 | /scud/plt/er_log/phil.py | 0a245e28e09a42bb507e5403f76287bb172726c9 | [] | no_license | kroon-lab/scud | bb3f7dc05c1000c0816d1b458d1c74bd74413053 | b55423edb4b0e33110cf96fbd3828f86166924c9 | refs/heads/master | 2020-03-18T06:49:26.989684 | 2019-05-01T14:56:14 | 2019-05-01T14:56:14 | 134,412,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | import libtbx.phil
from scud.general.phil_methods import init_command_line_phil
def phil_parse(args=None,log=None):
'''
Contains default parameters, will process commandline params and
changes them
'''
# Default parameters
master_phil = libtbx.phil.parse("""
er_log
{
input
.help = "Input files"
{
log = None
.type = path
.help = 'File name of PDB containing ensemble to be converted to supercell'
}
params
.help = "Control running"
{
title = None
.type = str
.help = 'Plot Title'
show = False
.type = bool
.help = 'show plot or not'
}
output
.help = "output files"
{
plot_out = plt.eps
.type = path
.help = 'Name of output plot'
}
}
""")
working_phil = init_command_line_phil(master_phil=master_phil,
args=args,
log=log)
return working_phil.extract()
| [
"l.m.j.kroon-batenburg@uu.nl"
] | l.m.j.kroon-batenburg@uu.nl |
0106c4e95e4cb7a8b9b3ea1a99c3e6cf72e413fa | ee461003c4836dcc2e7c493e7b705841825cba52 | /titanic/variable_builder.py | 5e497b440f4638a3102f6f594e2c37429bcd89c0 | [] | no_license | kenta-s/kaggle | 5e05b10b2455f8e5744dc4aab99def3b15681063 | b68ddfede3480214a163d4d8a778e4eb74d4f6f9 | refs/heads/master | 2021-06-24T03:37:41.385822 | 2018-10-27T00:36:04 | 2018-10-27T00:36:04 | 96,259,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import numpy as np
import pandas as pd
from IPython import embed
class VariableBuilder():
def __init__(self, file):
self.df = pd.read_csv(file)
def __call__(self):
valid_data = self.build_variable_x()
valid_data = np.array(valid_data).astype(np.float32).T
return valid_data
@staticmethod
def convert_sex_to_int(str):
if str == 'male':
return 0
elif str == 'female':
return 1
else:
return 2
@staticmethod
def convert_embarked_to_int(str):
if str == 'S':
return 0
elif str == 'C':
return 1
elif str == 'Q':
return 2
else:
return 3
def build_train_variable(self):
sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex))
age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age))
embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked))
valid_data = np.array([
self.df.Pclass,
sex_list,
age_list,
self.df.SibSp,
self.df.Parch,
self.df.Fare,
embarked_list,
self.df.Survived
]).astype(np.float32)
data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T))
return data
def build_test_variable(self, file):
sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex))
age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age))
embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked))
df2 = pd.read_csv(file)
survived = df2.Survived
valid_data = np.array([
self.df.Pclass,
sex_list,
age_list,
self.df.SibSp,
self.df.Parch,
self.df.Fare,
embarked_list,
survived
]).astype(np.float32)
data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T))
return data
| [
"knt01222@gmail.com"
] | knt01222@gmail.com |
b69ca6b786925c7020c263729f5d7bd1e74e3d05 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第七模块学习/Day04/EdmureBlog/web/forms/base.py | ab198421829eb1b2c3ebc96a9c1743d571cc884e | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 208 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
class BaseForm(object):
def __init__(self, request, *args, **kwargs):
self.request = request
super(BaseForm, self).__init__(*args, **kwargs)
| [
"wangwei_198811@163.com"
] | wangwei_198811@163.com |
f602f55691918872b41f72f9e122627a0f538a6e | 931841bd1de963e0dcfcf69114cec8c8c3f17323 | /search/avltree.py | 943eeeade92e526f2ae671c999a55416ecfaff7b | [] | no_license | sancheng/py-algos | 6f2e1e700c60224963f77b01c706a399b9897c1d | 1bb73d32a1cf4f95358df5d6e0aae2828098fd07 | refs/heads/master | 2021-08-21T21:07:33.199863 | 2017-11-29T03:10:26 | 2017-11-29T03:10:26 | 111,628,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,048 | py | class Node(object):
def __init__(self,val,parent,isleft):
self.val = val
self.parent = parent
self.leftchild,self.rightchild=None,None
if parent is not None:
if isleft:
parent.setleftChild(self)
else:
parent.setrightChild(self)
def setleftChild(self,left):
self.leftchild = left
if left is not None:
left.parent = self
def setrightChild(self,right):
self.rightchild = right
if right is not None:
right.parent = self
class AvlTree(object):
def __init__(self,cmp_func,rootval):
self.cmp = cmp_func
self.root = Node(rootval,None,False)
def height(self, node):
if node is None:
return 0
else:
return 1 + max(self.height(node.leftchild),self.height(node.rightchild))
def insert(self,element):
n = self.root
#insert directly
inode = None
while True:
if self.cmp(element,n.val) < 0:
if n.leftchild is None:
inode = Node(element,n,True)
break
else:
n = n.leftchild
else:
if n.rightchild is None:
inode = Node(element,n,False)
break
else:
n = n.rightchild
#find unbalanced subtree
isleft_rotate = True
path_directs = []
while inode is not None:
lh = self.height(inode.leftchild)
rh = self.height(inode.rightchild)
hdiff = lh -rh
if hdiff > 1:
isleft_rotate = False
break
elif hdiff < -1:
break
if inode.parent is not None:
if inode == inode.parent.leftchild:
path_directs.append(0)
else:
path_directs.append(1)
inode = inode.parent
#rebalance
if inode is not None:
if path_directs[-2] == 1 and path_directs[-1] == 1:
self.left_rotate(inode.rightchild,inode)
elif path_directs[-2] == 0 and path_directs[-1] == 0:
self.right_rotate(inode.leftchild,inode)
elif path_directs[-2] == 1 and path_directs[-1] == 0:
self.left_right_rotate(inode.leftchild, inode)
elif path_directs[-2] == 0 and path_directs[-1] == 1:
self.right_left_rotate(inode.leftchild,inode)
def search(self,value):
n = self.root
while n is not None and n.val != value:
if n.val > value:
n = n.leftchild
else:
n = n.rightchild
return n is not None
def left_rotate(self,node,pnode):
pp = pnode.parent
if pp is None:
self.root = node
node.parent = None
else:
if pp.leftchild == pnode:
pp.setleftChild(node)
else:
pp.setrightChild(node)
pnode.setrightChild(node.leftchild)
node.setleftChild(pnode)
def right_rotate(self,node,pnode):
pp = pnode.parent
if pp is None:
self.root = node
node.parent = None
else:
if pp.leftchild == pnode:
pp.setleftChild(node)
else:
pp.setrightChild(node)
pnode.setleftChild(node.rightchild)
node.setrightChild(pnode)
def printTree(self):
self.printNode(self.root)
def printNode(self,node):
print node.val
if node.leftchild is not None:
self.printNode(node.leftchild)
if node.rightchild is not None:
self.printNode(node.rightchild)
#test right rotation
tree = AvlTree(lambda x,y:x-y,1)
tree.insert(5)
tree.insert(8)
tree.insert(10)
tree.insert(11)
tree.insert(12)
tree.printTree()
print tree.search(11)
print tree.search(7)
| [
"sancheng@cisco.com"
] | sancheng@cisco.com |
b22afa174867cbcdb44387342cabbb4d1d5cce42 | 77a7f05272e82024cffa7ec3bf79b5cb5f90ee3e | /job_search_webapp_project/jobsearch/scraping/dice.py | e308bc35ab1f12a91bf963fe88d208574d75ae8f | [] | no_license | fergusonsa/JobSearch_Django | 5c3ff42cf59cd9380e5b10c3a2a64582382bbb55 | 35d626555b5dad8309358e3fde3c093a7df12702 | refs/heads/master | 2021-09-26T04:42:42.989940 | 2020-09-27T15:07:00 | 2020-09-27T15:07:00 | 85,593,500 | 0 | 0 | null | 2021-09-22T17:41:15 | 2017-03-20T15:26:32 | HTML | UTF-8 | Python | false | false | 20,929 | py | # coding: utf-8
import logging
import datetime
import re
import requests
from bs4 import BeautifulSoup
import geopy.geocoders
import geopy.distance
import geopy.exc
import jobsearch.scraping
import jobsearch.models as models
NUMBER_POSTINGS_PER_REQUEST = 25
MAX_POSTINGS_RETRIEVED = 1000
logger = logging.getLogger(__name__)
def get_max_len_of_dict_vals_for_keys(this_dict, keys):
lengths = [len(this_dict.get(key)) if this_dict.get(key) else 0
for key in keys]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def get_max_len_of_dict_vals_for_key_in_list_of_dicts(dict_list, keys):
lengths = [get_max_len_of_dict_vals_for_keys(thisDict, keys)
for thisDict in dict_list]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def convert_ago_to_date(ago_str):
try:
if ago_str.lower() in ('just posted', 'today'):
return datetime.datetime.now().strftime('%Y-%m-%d')
else:
value = re.sub('([0-9]+[+]?) (?:minute[s]?|hour[s]?|day[s]?) ago',
r"\1",
ago_str)
if 'minute' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
minutes=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'hour' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
hours=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'today' in ago_str.lower():
dt = datetime.datetime.now().strftime('%Y-%m-%d')
elif 'day' in ago_str:
if value == '30+':
dt = 'over 30 days old'
else:
dt = (datetime.datetime.now() - datetime.timedelta(
days=int(value))).strftime('%Y-%m-%d')
else:
dt = ago_str
except Exception as exc:
logger.error('Could not convert "{}" to a date'.format(ago_str), exc)
dt = ago_str
return dt
def parse_html_page(page_html, source, job_site_details, aliases, geo_locator, home_location, geo_locations,
search_terms='',
verbose=False):
"""
'numberJobsFound': {
'element':'div',
'criteria':{'id':'searchCount'},
'regex': '^Jobs (?:[0-9,]+) to (?:[0-9,]+) of ([0-9,]+)$',
},
"""
logger.debug(('parse_html_page(page_html, job_site_details={}, # aliases={}, geo_locator, home_location, '
'geo_locations, search_terms={}, verbose={})').format(job_site_details, len(aliases),
search_terms, verbose))
soup = BeautifulSoup(page_html, 'html.parser')
total_number_jobs_found = -1
num_jobs_details = job_site_details['parseInfo'].get('numberJobsFound')
if num_jobs_details:
number_postings_elem = soup.find(num_jobs_details['element'],
num_jobs_details['criteria'])
if number_postings_elem:
prop = num_jobs_details.get('property')
if prop:
value = number_postings_elem[prop]
elif hasattr(number_postings_elem, 'text'):
value = number_postings_elem.text
else:
value = number_postings_elem.string
if num_jobs_details.get('regex'):
value = re.sub(num_jobs_details['regex'], r"\1", value)
stripped_val = value.replace(',', '')
if stripped_val.isdigit():
total_number_jobs_found = int(stripped_val)
else:
logger.info(
'For %s site, the numberJobsFound parsing information, "%s", appears to return a non-numeric '
'string "%s" '
% (job_site_details['netLoc'], num_jobs_details['regex'], value))
total_number_jobs_found = 1 # Just to ensure that it is known that at least 1 job found
items = soup.findAll(job_site_details['parseInfo']['parentElement'],
job_site_details['parseInfo']['parentCriteria'])
postings_list = {}
for it in items:
posting_info = {'elem': it, 'searchTerms': search_terms}
for field in job_site_details['parseInfo']['fields'].keys():
field_info = job_site_details['parseInfo']['fields'][field]
# logger.info('looking for field {}'.format(field))
try:
value = None
elem_type = field_info['element']
if elem_type == 'parent':
elem = it
else:
elem = it.find(elem_type, field_info.get('criteria'))
prop = field_info.get('property')
if prop and elem.has_attr(prop):
value = elem[prop]
elif hasattr(elem, 'text'):
value = elem.text
elif elem:
value = elem.string
if field_info.get('regex'):
value = re.sub(field_info['regex'], r"\1", value)
if value:
posting_info[field] = re.sub(r"^\s+|\s+$|\s+(?=\s)", "", value)
except Exception as exc:
logger.error(('Unable to parse posting {} information for item: '
'\n\n{} \n\nError type: {}, val: {}').format(field, it,
type(exc), exc))
if posting_info.get('id'):
if posting_info.get('postedDate'):
posting_info['postedDate'] = convert_ago_to_date(
posting_info['postedDate'])
if posting_info.get('url'):
posting_info['url'] = 'http://{}{}'.format(
job_site_details['netLoc'], posting_info['url'])
if posting_info.get('elem'):
link_elements = posting_info['elem'].findAll('a')
for linkElem in link_elements:
if not linkElem['href'].startswith('http'):
if linkElem['href'].startswith('/'):
linkElem['href'] = 'http://{}{}'.format(
job_site_details['netLoc'], linkElem['href'])
else:
linkElem['href'] = 'http://{}/{}'.format(
job_site_details['netLoc'], linkElem['href'])
if posting_info.get('locale'):
posting_info['locale'] = posting_info['locale'].replace(' , ', ', ')
if jobsearch.scraping.save_posting_to_db(posting_info, source, search_terms, aliases,
geo_locator, home_location, geo_locations):
postings_list[posting_info['id']] = posting_info
if verbose:
logger.info(('Adding item details for id "{}" to list with posted'
' Date {}').format(posting_info['id'],
posting_info.get('postedDate')))
else:
logger.info('Unknown item not being added to list')
return postings_list, len(items), total_number_jobs_found
def sort_by_sub_dict(dictionary, sub_dict_key):
return sorted(dictionary.items(), key=lambda k_v: k_v[1][sub_dict_key])
def login_to_web_site(session, job_site_detail_info):
logger.debug('login_to_web_site(session, job_site_detail_info={})'.format(job_site_detail_info))
if job_site_detail_info.get('username') and job_site_detail_info.get('password'):
login_data = {
'action': 'Login',
'__email': job_site_detail_info['username'],
'__password': job_site_detail_info['password'],
'remember': '1',
'hl': 'en',
# 'continue': '/account/view?hl=en',
}
if job_site_detail_info['nextUrl']:
login_data['next'] = job_site_detail_info['nextUrl']
# res = session.get(job_site_detail_info['loginUrl'], verify=False)
res = session.post(job_site_detail_info['loginUrl'], data=login_data,
headers={"Referer": "HOMEPAGE"})
# if logger.getLogger().getEffectiveLevel() == logger.DEBUG:
logger.debug('session.post("{}", data={}) returns {}'.format(job_site_detail_info['loginUrl'],
login_data, res))
else:
logger.debug('Username "{}" or password "{}" is not set. Not logging in to website {}. Details: {}'.format(
job_site_detail_info.get('username'),
job_site_detail_info.get('password'),
job_site_detail_info.get('loginUrl'),
job_site_detail_info))
def get_postings_from_site_for_multiple_search_terms(source,
job_site_details_info,
search_terms_list,
aliases,
geo_locator,
home_location,
geo_locations,
expected_postings_per_page=10,
max_pages=100, min_pages=4,
verbose=False):
logger.debug(('get_postings_from_site_for_multiple_search_terms(job_site_details_info: {}, search_terms_list: {}, '
'# aliases: {}, expected_postings_per_page={}, geo_locator, home_location: {}, geo_locations,'
'max_pages={}, min_pages={}, verbose={})').format(job_site_details_info,
search_terms_list,
len(aliases),
home_location,
expected_postings_per_page,
max_pages,
min_pages, verbose))
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
for searchTerm in search_terms_list:
get_job_postings_from_site(
source, job_site_details_info, searchTerm, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=expected_postings_per_page,
max_pages=max_pages, min_pages=min_pages, session=session,
verbose=verbose)
def check_for_more_postings(num_postings_on_page, expected_postings_per_page,
num_unique_postings_found_on_page, num_postings_site_found,
start_index, max_pages, min_pages, verbose=False):
"""
Checks criteria for whether to check for more postings on the next page.
Args:
:param num_postings_on_page: the total number of postings found on the page
:param expected_postings_per_page: the number of postings expected to be on the page
:param num_unique_postings_found_on_page: the number of new/unique postings found on the page
:param num_postings_site_found: the total number of postings found on the site
:param start_index: the starting index for the page, should be a multiple of expectedPostingsPerPage
:param max_pages: the maximum number of pages to scrape
:param min_pages: the minimum number of pages to scrape
:param verbose:
"""
logger.debug(('check_for_more_postings(num_postings_on_page={}, expected_postings_per_page={}, '
'num_all_unique_postings_found_on_page={}, num_postings_site_found={}, '
'start_index={}, max_pages={}, min_pages={}, verbose={})').format(num_postings_on_page,
expected_postings_per_page,
num_unique_postings_found_on_page,
num_postings_site_found,
start_index, max_pages,
min_pages, verbose))
if start_index + expected_postings_per_page <= num_postings_site_found:
if num_postings_on_page == expected_postings_per_page:
if (num_unique_postings_found_on_page > 0 and
start_index < expected_postings_per_page * (max_pages - 1)):
return True
elif start_index < expected_postings_per_page * (min_pages - 1):
return True
if verbose:
logger.info(
'numPostingsOnPage ({0}) != expectedPostingsPerPage ({1}) OR numAllUniquePostingsFoundOnPage ({2}) == '
'0 OR startIndex ({3}) < expectedPostingsPerPage ({4}) * (maxPages ({5}) -1) OR startIndex ({3}) < '
'expectedPostingsPerPage ({4}) * (minPages ({6}) -1) '.format(
num_postings_on_page, expected_postings_per_page, num_unique_postings_found_on_page,
start_index, expected_postings_per_page, max_pages, min_pages))
return False
else:
if verbose:
logger.debug('startIndex ({}) + expectedPostingsPerPage ({}) <= numPostingsSiteFound ({}) is False'.format(
start_index, expected_postings_per_page, num_postings_site_found))
return False
def get_job_postings_from_site(source, job_site_details_info, search_term, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=10, max_pages=100,
min_pages=4, session=None, verbose=False):
logger.debug(('get_job_postings_from_site(job_site_details_info={}, search_term={}, # aliases={},'
'geo_locator, home_location, geo_locations, '
'expected_postings_per_page={}, max_pages={}, '
'min_pages={}, session={}, verbose={}').format(job_site_details_info, search_term, len(aliases),
geo_locator, home_location, geo_locations,
expected_postings_per_page, max_pages,
min_pages, session, verbose))
if not session:
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
start_index = 0
url_arguments = {'q': search_term,
'l': job_site_details_info['location'],
job_site_details_info['jobTypeKey']: 'contract',
'sort': 'date',
job_site_details_info['pageIndexKey']: 0,
}
url = '{}://{}/{}'.format(job_site_details_info['urlSchema'],
job_site_details_info['netLoc'],
job_site_details_info['urlPath'])
page = session.get(url, params=url_arguments, verify=False)
# logger.info('\n\n page header content-type info: {}\n'.format(
# page.headers['content-type']))
logger.info('\n\nHere is initial URL to be "scraped": {}\n'.format(page.url))
postings_list, num_postings_on_page, init_total_num_postings = parse_html_page(
page.text, source, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(
len(postings_list), num_postings_on_page,
init_total_num_postings, page.url))
while check_for_more_postings(num_postings_on_page, expected_postings_per_page,
len(postings_list), init_total_num_postings,
start_index, max_pages, min_pages, verbose):
start_index += expected_postings_per_page
if job_site_details_info['pageIndexType'] == 'pageCount':
url_arguments[job_site_details_info['pageIndexKey']] += 1
else:
url_arguments[job_site_details_info['pageIndexKey']] = start_index
page = session.get(url, params=url_arguments, verify=False)
postings_list, num_postings_on_page, total_number_jobs_found = parse_html_page(
page.text, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(len(postings_list),
num_postings_on_page,
total_number_jobs_found,
page.url))
def scrape_new_job_postings(config=None, geo_locator=None, geo_locations=None, home_location=None):
if not config:
config = jobsearch.scraping.get_configuration()
if not geo_locator:
geo_locator = geopy.geocoders.Nominatim(user_agent="JobSearch")
if not home_location:
# Get coordinates for home
home_location_str = config.get('home_location')
home_location = jobsearch.scraping.get_geo_location(geo_locator, home_location_str)
if not geo_locations:
geo_locations = {} # Cache of geo locations, so do not have to get the same location multiple times
search_terms_list = ['java', 'devops', 'python', ]
client = None
if not client:
logger.warning('Dice posting retrieval not implemented yet!')
return 0
inserted_timestamp = datetime.datetime.now()
for search_term in search_terms_list:
start_index = 0
get_more_postings = True
while get_more_postings:
get_more_postings = False
params = {
'q': search_term,
'jt': 'contract',
'l': "ottawa,ontario,canada",
'userip': "1.2.3.4",
'useragent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2)",
'start': start_index,
'limit': NUMBER_POSTINGS_PER_REQUEST,
'co': 'ca',
'sort': 'date'
}
logger.debug("Getting postings for {} starting index {}".format(search_term, start_index))
search_response = client.search(**params)
logger.debug(search_response)
results_postings = search_response.get('results')
if results_postings:
aliases = models.CompanyAliases.objects.all()
for posting in results_postings:
if jobsearch.scraping.save_posting_to_db(posting, 'dice', search_term, aliases,
geo_locator, home_location, geo_locations):
# If we saved at least 1 posting, then we can try getting more postings from the source!
get_more_postings = True
start_index += len(results_postings)
if not results_postings:
logger.debug('No postings returned from indeed api call, so not trying to get any more!')
break
if start_index > MAX_POSTINGS_RETRIEVED:
logger.debug('Already retrieved max number, {}, of postings, so not trying to get any more!'.format(
MAX_POSTINGS_RETRIEVED))
break
num_new_postings = models.JobPostings.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_aliases = models.CompanyAliases.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_recruiters = models.RecruitingCompanies.objects.filter(date_inserted__gte=inserted_timestamp).count()
logger.debug('# new postings from Dice saved: {} # aliases: {} # recruiters: {} '.format(num_new_postings,
num_saved_aliases,
num_saved_recruiters))
return num_new_postings
| [
"fergusonsa@yahoo.com"
] | fergusonsa@yahoo.com |
883ff69f8f33ab9939a29caa2769bdfcffbdd30c | d6ce7d815af09eea09d8bc2c6f3aaa1b341270cc | /ros_ws/devel/lib/python3/dist-packages/cozmo_rc/srv/__init__.py | 56810697b51cdf3f2704bf4724155ca111994cdd | [] | no_license | danbrick92/cozmoRos | 7e47569e6d9cdd56c84b6cffb5b1fe46453f4b48 | f0345c70f58525d3cbd4227e109b468fa4a07e15 | refs/heads/main | 2023-09-03T21:30:52.839599 | 2021-11-18T01:33:15 | 2021-11-18T01:33:15 | 423,007,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from ._light_req import *
from ._speaker_req import *
| [
"danbrickner@hotmail.com"
] | danbrickner@hotmail.com |
8d16a7b317c421b41cb6db551f09e5d6d244cff9 | 3d8d874ebba15fd065c0a9e74c05e8cd2a24dbe8 | /Week 6 - Joining Data with pandas/19-Concatenate and merge to find common songs.py | 9ad795f5e20ab5a06eff3519aec9c340843f3813 | [] | no_license | RomuloMileris/UCD_Professional_Certificate_in_Data_Analytics | db3e583a6e607e74f3d26b65ba0de59cff64e5a3 | a4a77df69a2440132cfa3e89c4a1674e3e02d086 | refs/heads/master | 2023-02-22T12:48:50.039440 | 2021-01-15T17:06:07 | 2021-01-15T17:06:07 | 319,717,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Merge classic_18_19 with pop_18_19
classic_pop = classic_18_19.merge(pop_18_19, on='tid')
# Using .isin(), filter classic_18_19 rows where tid is in classic_pop
popular_classic = classic_18_19[classic_18_19['tid'].isin(classic_pop['tid'])]
# Print popular chart
print(popular_classic) | [
"romulosmileris@gmail.com"
] | romulosmileris@gmail.com |
b3698f59655330a8fa5ab0c4d49985791d562870 | f284021b02f6331888b6d41cfc34d555367b3797 | /bin/easy_install | b9baf79171c742180e9e609b8b8fc87a1bd06354 | [] | no_license | Hubert51/Web_Django | aa8aa771de3085d7bff2fd2b64e8de131b9af537 | f48ad6260291311262a95f71ceda354990518dfc | refs/heads/master | 2020-01-23T21:38:44.507734 | 2016-11-29T02:17:52 | 2016-11-29T02:17:52 | 74,692,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/gengruijie/Django1.10/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gengr@rpi.edu"
] | gengr@rpi.edu | |
fa423cdd35927ebb9664b82df50cab4322eebe1f | 9f75a1f7e1aa7c9e3bff6aeb261808d596b75fa5 | /agent.py | 8f2c88d0157d6805b784d73414cf0700d6839e53 | [] | no_license | butterkaffee/drlnd_project1 | 2609b97d4122683b25d2c22f452077d2ccde1f71 | 2843211006947f0598e5ba7c23f10e90e399d834 | refs/heads/master | 2020-05-31T06:42:36.942280 | 2019-07-08T17:25:56 | 2019-07-08T17:25:56 | 190,148,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,605 | py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork, DuelingDQN
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 256 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_atoms = 51
Vmin = -10
Vmax = 10
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class PrioReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed, prob_alpha=0.6):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.prob_alpha = prob_alpha
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.priorities = np.zeros((buffer_size,), dtype=np.float32)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class DuelingAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = DuelingDQN(state_size, action_size, seed).to(device)
self.qnetwork_target = DuelingDQN(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local.act(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
if random.uniform(0,1) > 0.99:
print(loss)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
| [
"noreply@github.com"
] | butterkaffee.noreply@github.com |
e0f9841169ca668d1ced55ab7a6441e600ad51af | 064992da81d70b4df85fc192cddf93f2ded111a0 | /analytic_scripts/code_reimplementation/Android/android_reimpl.py | cdba8d7572a2f6a951af93712bab41bee11b75b4 | [] | no_license | maxxbw54/reuse_reimpl | 07fd42ee1708337d009a88e505b2a157e4aeaef9 | c88bc6671f791485dfd47a35c1c5d16415b5beac | refs/heads/master | 2021-09-14T11:28:28.835579 | 2018-05-12T13:41:27 | 2018-05-12T13:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,450 | py | import sys, subprocess, os, re
from collections import OrderedDict
import pandas as pd
def shellCommand(command_str):
cmd = subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
return cmd_out
def removeBracketsInQuotes(line):
line = re.sub(r'\\\"', '', line)
return re.sub(r'\".*?\"', '', line)
def addedMethods(diff_list):
added_method_list = list()
in_block = False
brackets = 0
method_name, param_cnt = None, None
for line in diff_list:
cleaned_line = removeBracketsInQuotes(line)
if cleaned_line.startswith('+') and re.search(method_pattern, cleaned_line):
method_sig = re.findall('(?:(?:public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+(?:[\\$_\\w\\<\\>\\[\\]]*)\\s+([\\$_\\w]+)\\(([^\\)]*)\\)?\\s*\\{?[^\\}]*\\}?', cleaned_line)
method_name = method_sig[0][0]
if len(method_sig[0][1].strip()) == 0:
param_cnt = 0
else:
param_cnt = method_sig[0][1].count(',') + 1
in_block = True
if '{' in cleaned_line:
brackets += 1
if '}' in cleaned_line:
brackets -= 1
if brackets == 0:
if DEBUG:
print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt)
added_method_list.append([method_name, param_cnt])
in_block = False
brackets = 0
elif in_block and cleaned_line.startswith('+'):
if '{' in cleaned_line:
brackets += 1
if '}' in cleaned_line:
brackets -= 1
if brackets == 0:
if DEBUG:
print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt)
added_method_list.append([method_name, param_cnt])
in_block = False
brackets = 0
elif in_block == True:
in_block = False
brackets = 0
return added_method_list
def removedInvocations(diff_list):
imported_classes = dict()
instance_dict = dict()
removed_invoc_dict = OrderedDict()
i = 1
for line in diff_list:
if not re.search(r'^(\+|\-)?\s*$', line):
if line.startswith('-'):
# collect removed library methods
matched = re.findall(r'import\s+[\w\.]+\.(\w+)\s*\;', line)
if matched:
full_import_class = line[1:-1]
class_name = matched[0]
imported_classes[class_name] = full_import_class
else:
# instance of the removed library method
instantiated = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\s*\=\s*new\s+(\w+)\s*\<?\s*\w*\s*\>?\s*\(.*\)\s*\;', line)
if len(instantiated):
if instantiated[0][1] in imported_classes:
instance_dict[instantiated[0][0]] = imported_classes[instantiated[0][1]]
else:
# remove redundant white space
cleaned_line = re.sub(r'\s+', ' ', line)
# whether an instance of a removed library method's invocation is also removed (instance method)
for inst in instance_dict:
# IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE?
invoc = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\.\w+\s*\(.*\)\s*\;', cleaned_line)
if len(invoc):
if invoc[0] == inst:
removed_invoc_dict[i] = instance_dict[inst]
break
# whether a removed library method's invocation is also removed (class method)
for c in imported_classes:
if not ('implements %s' %c in cleaned_line or 'extends %s' %c in cleaned_line):
# IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE?
invoc = re.findall(r'(\w+)\.\w+\s*\(.*\)\s*\;', cleaned_line)
if len(invoc):
if invoc[0] == c:
removed_invoc_dict[i] = imported_classes[c]
break
i += 1
if DEBUG:
print removed_invoc_dict
return removed_invoc_dict
def addNearDelPosition(last_removed, removed_cnt, i, line):
if last_removed:
position_delta = i - last_removed - removed_cnt
if DEBUG:
print 'Pos delta:', position_delta, line
if position_delta < 5 and position_delta > -5:
return True
return False
def addedInvocations(diff_list, added_method_list, removed_invoc_list):
refact_pairs = set()
last_removed = None
removed_cnt = 0
i = 1
for line in diff_list:
if not re.search(r'^(\+|\-)?\s*$', line):
if line.startswith('+'):
for m in added_method_list:
method_name = m[0]
param_cnt = m[1]
if (re.search(method_pattern, line)) == None and (method_name in line):
matched = re.search(r'\((.+)\)', line)
if matched:
if len(matched.group(1).strip()) == 0:
argument_cnt = 0
else:
argument_cnt = matched.group(1).count(',')+1
if param_cnt == argument_cnt:
addedNearby = addNearDelPosition(last_removed, removed_cnt, i, line)
if addedNearby:
if DEBUG:
print last_removed, i, line
refact_pairs.add((last_removed, i, last_library))
elif line.startswith('-'):
if i in removed_invoc_list:
last_removed = i
removed_cnt = 0
last_library = removed_invoc_list[i]
elif removed_cnt != None:
removed_cnt += 1
i += 1
if DEBUG:
print sorted(refact_pairs)
return sorted(refact_pairs)
# combine main funcitons to search refactoring from a client method implementation to an API call
def searchRefactoring(diff_str):
diff_list = diff_str.split('\n')
# Detect entire added methods
added_method_list = addedMethods(diff_list)
# check whether a library method is removed near a deleted method call
removed_invoc_list = removedInvocations(diff_list)
# Check whether an added method's invocation is also added
refact_res = addedInvocations(diff_list, added_method_list, removed_invoc_list)
return refact_res
def formatOutput(refact_res):
formatted_list = list()
for pair in refact_res:
formatted_list.append('%s^%s' %(pair[0],pair[1]))
return '-'.join(formatted_list)
if __name__ == '__main__':
DEBUG = False
method_pattern = '((public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+[\\$_\\w\\<\\>\\[\\]]*\\s+[\\$_\\w]+\\([^\\)]*\\)?\\s*\\{?[^\\}]*\\}?'
current_dir = os.getcwd()
shellCommand('mkdir -p %s/converse_candidates' %current_dir)
i = 1
app_names = os.listdir('fdroid_apps')
for an_app in app_names:
print 'Analyzing %s (%d) ...' %(an_app,i)
output_list = list()
# change to the subject system's directory
os.chdir('%s/fdroid_apps/%s' %(current_dir,an_app))
# output commit list
commit_logs = subprocess.check_output('git log --pretty=format:%h'.split())
for commit_id in commit_logs.split('\n'):
if len(commit_id):
diff_str = shellCommand('git show %s' %commit_id)
# our current computational resources cannot allow to analyze super huge patches
if sys.getsizeof(diff_str) > 1000000:
print ' %s is too big!' %commit_id
print ' ' + '-' * 50
else:
refact_res = searchRefactoring(diff_str)
if len(refact_res):
# output locations
print ' ', commit_id
output_list.append(commit_id)
for res in refact_res:
print ' ', res[0], res[1], '\t', res[2]
output_list.append(' (%s, %s)\t%s' %(res[0],res[1],res[2]))
print ' ' + '-' * 50
output_list.append('-' * 50)
# output the patch
shellCommand('mkdir -p %s/converse_patches/%s' %(current_dir,an_app))
with open('%s/converse_patches/%s/%s.txt' %(current_dir,an_app,commit_id), 'w') as pf:
pf.write(diff_str)
if len(output_list):
with open('%s/converse_candidates/%s_candidates.txt' %(current_dir,an_app), 'w') as wf:
wf.write('\n'.join(output_list))
i += 1
| [
"le.an@polymtl.ca"
] | le.an@polymtl.ca |
451da3310b48ed6fd08983ee33fda5f2b27b92fd | 24171ea136e2ec211792d1d7644cd5c945a6df35 | /test/41.py | a05e622c42a995a2e87dd72731ed1c176dae0dc4 | [] | no_license | reidevries/codecoach | 1329ab367dc8aa3f3dd76af0b7cbc975a7d67ccd | a6d8e3cf28a6d264b0aa6aa8a44cc315803954b2 | refs/heads/master | 2021-05-23T10:02:53.403344 | 2020-04-05T12:57:24 | 2020-04-05T12:57:24 | 253,233,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | #! /usr/bin/env python2.6
import re
import sys
import os
argv = len(sys.argv)
if argv != 2:
print "usage: ipfun.py <filename>"
sys.exit(1)
argo = sys.argv[1]
if (os.access(argo, os.R_OK) == 0):
print argo, "is not readable"
sys.exit(2)
InFile = open(argo, "r")
ipcheck = r"((([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\:)"
for line in InFile:
validip = re.match(ipcheck, line)
line = line.split('\n')
line = line[0]
if validip:
line2 = line.split(':')
try :
port = int(line2[1])
except:
print line,"- Invalid Port Number"
else:
if ((port > 0) & (port < 32767)):
validport = 1
if port < 1024:
root = 1
print line,"- Valid (root privileges required)"
else:
root = 0
print line,"- Valid"
else:
print line,"- Invalid Port Number"
else:
print line,"- Invalid IP Address"
sys.exit(0) | [
"raeaw@localhost.localdomain"
] | raeaw@localhost.localdomain |
fc56269afc1a9b27972e6ba65f1634e38ca3c907 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/volatil.py | da3fffbd742a2e39d77bda58f2168f2a493c7200 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 586 | py | ii = [('EmerRN.py', 1), ('RogePAV2.py', 2), ('GodwWSL2.py', 1), ('FerrSDO3.py', 1), ('WilbRLW.py', 1), ('ProuWCM.py', 5), ('PettTHE.py', 3), ('PeckJNG.py', 1), ('WilbRLW2.py', 7), ('CarlTFR.py', 2), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('BuckWGM.py', 1), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('SoutRD2.py', 1), ('MedwTAI2.py', 1), ('BuckWGM2.py', 1), ('WestJIT.py', 2), ('FitzRNS4.py', 2), ('EdgeMHT.py', 1), ('LyttELD3.py', 1), ('BellCHM.py', 1), ('WilbRLW3.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 4), ('FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('BeckWRE.py', 1), ('WordWYR.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
670e94a3bedc2fa474c3d44db8f5ae1bab732516 | f3515dd05089b6491ccb4c28ac6654b7f86e77b0 | /hw_4/final_submittal/cross_validation_script.py | 11b87a12e232f801086c9ba118854eadc8e211eb | [] | no_license | jb08/AI | f17409832c0af8710957f4b4d6c80d90aa06f198 | 8f05d6989174b3c6b76547c5370042038a54e78d | refs/heads/master | 2021-01-01T04:12:10.496120 | 2016-06-09T16:40:58 | 2016-06-09T16:40:58 | 56,189,015 | 0 | 0 | null | 2016-06-08T17:06:55 | 2016-04-13T22:01:04 | Python | UTF-8 | Python | false | false | 5,263 | py | # Name: Megan Sinclair, David Williams, Jason Brown
# Date: 5/23/16
# All group members were present and contributing during all work on this project
#
# Note that there is a retrain function in this script. This function mirrors the training
# that is present in our bayes.py and bayesbest.py files. Mirroring it here was simply
# done to make our cross-validation easier, but bayesbest.py is still intended to be used
# by itself.
import bayes
import bayesbest
import os, time
def ten_fold():
pos_true = 0
pos_false = 0
neg_true = 0
neg_false = 0
best_pos_true = 0
best_pos_false = 0
best_neg_true = 0
best_neg_false = 0
bc = bayes.Bayes_Classifier()
bcc = bayesbest.Bayes_Classifier()
for i in range(10):
training,testing = single_fold(i)
retrain(bc,training, False)
retrain(bcc,training, True)
#print "\tDone training"
#print len(testing)
#print len(training)
#time.sleep(3)
ct = 1
for f in testing:
sTxt = bc.loadFile("movies_reviews/" + f)
bc_result = bc.classify(sTxt)
bcc_result = bcc.classify(sTxt)
#print "\tTested: " ,ct
ct += 1
if (f.startswith("movies-5")):
if bc_result == "positive":
pos_true += 1
else:
pos_false += 1
if bcc_result == "positive":
best_pos_true += 1
else:
best_pos_false += 1
elif (f.startswith("movies-1")):
if bc_result == "negative":
neg_true += 1
else:
neg_false += 1
if bcc_result == "negative":
best_neg_true += 1
else:
best_neg_false += 1
print "fold: ", i
print "\treg results: %d %d %d %d" % (pos_true, pos_false, neg_true, neg_false)
print "\tbest results: %d %d %d %d" % (best_pos_true, best_pos_false, best_neg_true, best_neg_false)
#precision
precision_positive = pos_true / float(pos_true + pos_false)
precision_negative = neg_true / float(neg_true + neg_false)
best_precision_positive = best_pos_true / float(best_pos_true + best_pos_false)
best_precision_negative = best_neg_true / float(best_neg_true + best_neg_false)
#recall
recall_positive = pos_true / float(pos_true + neg_false)
recall_negative = neg_true / float(neg_true + pos_false)
best_recall_positive = best_pos_true / float(best_pos_true + best_neg_false)
best_recall_negative = best_neg_true / float(best_neg_true + best_pos_false)
#f-measure
f_measure_positive = (2 * precision_positive * recall_positive) / float(precision_positive + recall_positive)
f_measure_negative = (2 * precision_negative * recall_negative) / float(precision_negative + recall_negative)
best_f_measure_positive = (2 * best_precision_positive * best_recall_positive) / float(best_precision_positive + best_recall_positive)
best_f_measure_negative = (2 * best_precision_negative * best_recall_negative) / float(best_precision_negative + best_recall_negative)
print "naive bayes classifier:"
print " precision_positive: %.3f" % precision_positive
print " precision_negative: %.3f"% precision_negative
print " recall_positive: %.3f" %recall_positive
print " recall_negative: %.3f" %recall_negative
print " f_measure_positive: %.3f" %f_measure_positive
print " f_measure_negative: %.3f" %f_measure_negative
print " "
print "naive bayes classifier (improved):"
print " precision_positive: %.3f" %best_precision_positive
print " precision_negative: %.3f" %best_precision_negative
print " recall_positive: %.3f" %best_recall_positive
print " recall_negative: %.3f" %best_recall_negative
print " f_measure_positive: %.3f" %best_f_measure_positive
print " f_measure_negative: %.3f" %best_f_measure_negative
def single_fold(start_val):
count = start_val%10 #10 fold validation
IFileList = []
for fFileObj in os.walk("movies_reviews/"):
IFileList = fFileObj[2]
break
training_set = []
testing_set = []
for f in IFileList:
#Training set
if(count == 9):
#print "count was: ", count, " ; append to testing_set"
testing_set.append(f)
count = 0
else:
#print "count was: ", count, " ; append to training_set"
training_set.append(f)
count+=1
return training_set,testing_set
def retrain(bc, training_set, is_best):
#For each file name, parse and determine if pos (5) or neg (1)
bc.positive = dict()
bc.negative = dict()
for f in training_set:
#Positive review, add words/frequencies to positive dictionary
if (f.startswith("movies-5")):
bc.dictionary = bc.positive
#Negative review, add words/frequencies to negative dictionary
elif (f.startswith("movies-1")):
bc.dictionary = bc.negative
else:
#print "error: file didn't start with movies-1 or movies-5"
continue
sTxt = bc.loadFile("movies_reviews/" + f)
token_list = bc.tokenize(sTxt)
#print "dictionary: ", dictionary
for word in token_list:
if (is_best):
word = word.lower()
#If word exists in dictionary already, increase frequency by 1
if word in bc.dictionary:
bc.dictionary[word] +=1
#Add word to dictionary with frequency of 1 if it did not already exist
else:
bc.dictionary[word] = 1 | [
"jasonkingsley.brown@gmail.com"
] | jasonkingsley.brown@gmail.com |
132c053eb5afe2d84aa47b7ec1f8974eb06f8dce | f34ed25e140a1e9f09d1fb4253674b317b989125 | /NURB/manage.py | 52c8c2eebf023aad825462247ee388c57a0c342b | [] | no_license | westonpace/NUR | 925ae3e01a5315292d3cb96d98603dd77182acec | 01b8e657583c549afda0e11abb9b9fb8712147eb | refs/heads/master | 2021-01-22T11:51:14.011919 | 2013-05-22T03:09:01 | 2013-05-22T03:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NURB.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"weston.pace@gmail.com"
] | weston.pace@gmail.com |
3312b51d5e5f1f3726320f7259525ad1936b0f31 | b7320c9d3b36973812314cb6cde6c056f3311972 | /general_test.py | 530964ae76bd5bac390bc7c9f6451797558492a2 | [] | no_license | dhueholt/Misc-bits | 27b75cf85026d0253e53f99197c11bbfc44baba6 | dc7219ea79234e661c5d8f9b113a26edacd94ec5 | refs/heads/master | 2022-02-14T23:16:16.831681 | 2019-07-22T20:18:40 | 2019-07-22T20:18:40 | 198,296,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | """ Test for VIIRS EDR product
Author(s): Daniel Hueholt @dhueholt GitHub
"""
from glob import glob
import matplotlib.pyplot as plt
from satpy import Scene
import cartopy.crs as ccrs
import pdb
FILENAMES = glob('/Users/dhueholt/Documents/Data/CloudMask/20190306/JRR*.nc')
SCN = Scene(reader='viirs_edr_gran', filenames=FILENAMES)
SCN.load(['cloudmaskbinary'])
MY_AREA = SCN['cloudmaskbinary'].attrs['area'].compute_optimal_bb_area({'proj': 'lcc', 'lon_0': -96.,
'lat_0': 39., 'lat_1': 25.,
'lat_2': 25.})
NEW_SCN = SCN.resample(MY_AREA)
# pdb.set_trace()
NEW_SCN.save_dataset('cloudmaskbinary','/Users/dhueholt/Images/cmb.png')
CRS = NEW_SCN['cloudmaskbinary'].attrs['area'].to_cartopy_crs()
lambert_proj = ccrs.LambertConformal()
AX = plt.axes(projection=CRS)
AX.coastlines()
AX.gridlines()
AX.set_global()
plt.imshow(NEW_SCN['cloudmaskbinary'], transform=CRS, extent=CRS.bounds, origin='upper')
# CBAR = plt.colorbar()
# CBAR.set_label('cloudmaskbinary')
# plt.clim(-4,4)
plt.savefig('/Users/dhueholt/Images/reference_1.png')
| [
"dmhuehol@ncsu.edu"
] | dmhuehol@ncsu.edu |
ba8d9485f114b77345b5bdc786cacf2516b8dba0 | b29dcbf879166592b59e34f0e2bc4918c3ac94a0 | /cart/views.py | 4dfc522e62c9c9e4cc9b815d50b1184bbe3d6954 | [] | no_license | samdasoxide/myshop | ce6d4553af04f1ddf5de1cbfa38ef2ff33ac6b11 | 21115de7748862c8a44ef4dc5a61511ad67746dd | refs/heads/master | 2022-12-14T07:39:13.803686 | 2017-06-20T11:42:30 | 2017-06-20T11:42:30 | 92,954,076 | 0 | 0 | null | 2022-12-07T23:58:40 | 2017-05-31T14:23:18 | JavaScript | UTF-8 | Python | false | false | 1,067 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductFrom
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductFrom(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductFrom(
initial={'quantity': item['quantity'], 'update': True}
)
return render(request, 'cart/detail.html', {'cart': cart})
| [
"samdasoxide@gmail.com"
] | samdasoxide@gmail.com |
b3743862fc7b8de3b6dca5344e37f61f50a634eb | b97a608517f024b81db0bdc4094d143ba87c8af4 | /src/oceandata/export_production/mouw.py | 5922a9fe193338af1b8d507473dce963eb6aaa90 | [
"MIT"
] | permissive | brorfred/oceandata | ff008042cc993a07d9db1de3fa72e70f70d44219 | 831e0691223da1aa6a6e97175e8c2d7874bf60cd | refs/heads/master | 2022-02-14T11:48:13.401206 | 2022-01-27T17:01:56 | 2022-01-27T17:01:56 | 175,451,337 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | """
Global ocean particulate organic carbon flux.
Ref: https://doi.org/10.1594/PANGAEA.855594,
"""
import os, pathlib
import warnings
import pandas as pd
import numpy as np
import requests
DATADIR = pathlib.PurePath(pathlib.Path.home(), ".oceandata")
pathlib.Path(DATADIR).mkdir(parents=True, exist_ok=True)
DATAURL = "https://doi.pangaea.de/10.1594/PANGAEA.855594"
"""
def load():
df = pd.read_hdf("h5files/ep_mouw_with_sat.h5")
df["Zeu"] = 4.6/df.kd490
df["ep_obs"] = df.POC_flux
df["chl"] = df["chl"] * df["Zeu"]
#lh = ecoregions.Longhurst()
#longh = lh.match("regions", lonvec=dfm.lon, latvec=dfm.lat, jdvec=dfm.lat*0)
#dfm["longhurst"] = longh
return df
"""
def load(datadir=DATADIR, filename="GO_flux.tab", with_std=False):
"""Load tab file and fix some columns"""
fn = os.path.join(datadir, filename)
if not os.path.isfile(fn):
download(datadir=datadir, filename=filename)
with open(fn ,"r") as fH:
while 1:
line = fH.readline()
if "*/" in line:
break
df = pd.read_csv(fH, sep="\t", parse_dates=[1,])
if not with_std:
df.drop(columns=['Flux std dev [±]', 'C flux [mg/m**2/day]',
'C flux std dev [±]', 'POC flux std dev [±]',
'PIC flux std dev [±]', 'PON flux std dev [±]',
'POP flux std dev [±]', 'PSi flux std dev [±]',
'PAl std dev [±]', 'CaCO3 flux std dev [±]',
'Reference'], inplace=True)
df.rename(columns={'ID (Reference identifier)':"ref_ID",
'ID (Unique location identifier)':"UUID",
'Type (Data type)':"sampling_type",
'Latitude':"lat",
'Longitude':"lon",
'Flux tot [mg/m**2/day]':"tot_flux",
'POC flux [mg/m**2/day]':"POC_flux",
'PIC flux [mg/m**2/day]':"PIC_flux",
'PON flux [mg/m**2/day]':"PON_flux",
'POP flux [mg/m**2/day]':"POP_flux",
'PSi flux [mg/m**2/day]':"PSi_flux",
'PSiO2 flux [mg/m**2/day]':"PSiO2_flux",
'PSi(OH)4 flux [mg/m**2/day]':"PSiOH4_flux",
'PAl [mg/m**2/day]':"PAl_flux",
'Chl flux [mg/m**2/day]':"Chl_flux",
'Pheop flux [µg/m**2/day]':"Pheop_flux",
'CaCO3 flux [mg/m**2/day]':"CaCO3_flux",
'Fe flux [mg/m**2/day]':"Fe_flux",
'Mn flux [µg/m**2/day]':"Mn_flux",
'Ba flux [µg/m**2/day]':"Ba_flux",
'Detrital flux [mg/m**2/day]':"Detr_flux",
'Ti flux [µg/m**2/day]':"Ti_flux",
'Bathy depth [m] (ETOPO1 bathymetry)':"bathy",
'Depth water [m] (Sediment trap deployment depth)':"depth",
'Area [m**2]':"area",
'Duration [days]':"duration",
'Date/Time (Deployed)':"start_time",
'Date/time end (Retrieved)':"end_time",
'Area [m**2] (Surface area of trap)':"trap_area",
},
inplace=True)
df.drop(columns=['Type (Sediment trap type)',
'Elevation [m a.s.l.] (Total water depth)'],
inplace=True)
df["start_time"] = pd.DatetimeIndex(df["start_time"])
df["end_time"] = pd.DatetimeIndex(df["end_time"])
df.set_index("end_time", inplace=True)
return df
def download(datadir=DATADIR, filename="GO_flux.tab"):
"""Download txt file from BATS server
Refs
----
"""
local_filename = os.path.join(datadir, filename)
try:
os.unlink(local_filename)
except FileNotFoundError:
pass
try:
r = requests.get(DATAURL, stream=True, timeout=6, params={"format":"textfile"})
except requests.ReadTimeout:
warnings.warn("Connection to server timed out.")
return False
if r.ok:
if local_filename is None:
return r.text
else:
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
else:
raise IOError(f"Could not download file from server, Error {r.status_code}")
| [
"brorfred@gmail.com"
] | brorfred@gmail.com |
f525a1f530ac0b939164e1ae587b3a12727bf3d3 | e1f78a71c0ce255ab064e0fa9fb3bdb7251bb016 | /src/QuickPaint.py | 7d9e480cf817e89458f9543abf8c54f0d1bd2c03 | [] | no_license | dylansloann/SketchMath | 7675f7e40ef5ae31675c1fa062e2718f41390c07 | 874e624dd3a86a0f879fa54f609115fd393bb1dc | refs/heads/master | 2023-05-30T00:55:00.461682 | 2021-06-13T05:00:44 | 2021-06-13T05:00:44 | 294,305,328 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,745 | py | from PyQt5 import QtCore, QtGui, QtWidgets
import sys, os
class Paint(QtWidgets.QMainWindow):
def __init__(self):
super(Paint, self).__init__()
self.windowSetup()
self.menuOptionsSetup()
self.saveCommmandSetup()
self.eraseCommmandSetup()
self.colorsSetup()
self.brushSizeSetup()
def windowSetup(self):
self.setWindowTitle("QuickPaint")
self.setGeometry(100, 100, 500, 500)
self.setFixedSize(500, 500)
self.setWindowIcon(QtGui.QIcon("./icons/painticon.png"))
self.image = QtGui.QImage(self.size(), QtGui.QImage.Format_RGB32)
self.image.fill(QtCore.Qt.white)
# default brush
self.brushSize = 2
self.drawing = False
self.brushColor = QtCore.Qt.black
self.lastPoint = QtCore.QPoint()
def menuOptionsSetup(self):
global main_menu
main_menu = self.menuBar()
global file_menu
file_menu = main_menu.addMenu("File")
global color_menu
color_menu = main_menu.addMenu("Color")
global size_menu
size_menu = main_menu.addMenu("Size")
def saveCommmandSetup(self):
save_command = QtWidgets.QAction(QtGui.QIcon("./icons/saveicon.png"), "Save", self)
save_command.setShortcut("Ctrl + S")
file_menu.addAction(save_command)
save_command.triggered.connect(self.save)
def eraseCommmandSetup(self):
erase_command = QtWidgets.QAction(QtGui.QIcon("./icons/brushicon.png"), "Erase", self)
erase_command.setShortcut("Ctrl + E")
file_menu.addAction(erase_command)
erase_command.triggered.connect(self.erase)
# designation of menu bar commands
def save(self):
file_path = QtWidgets.QFileDialog.getSaveFileName(self, "Save Image", "", "PNG;;JPG;;All_Files")
if file_path[0] == "":
return
self.image.save(file_path[0])
def erase(self):
self.image.fill(QtCore.Qt.white)
self.update()
def colorsSetup(self):
black = QtWidgets.QAction(QtGui.QIcon("./icons/blackicon.png"), "Black", self)
color_menu.addAction(black)
black.triggered.connect(self.color_black)
white = QtWidgets.QAction(QtGui.QIcon("./icons/whiteicon.png"), "White", self)
color_menu.addAction(white)
white.triggered.connect(self.color_white)
darkCyan = QtWidgets.QAction(QtGui.QIcon("./icons/darkCyanicon.png"), "Cyan", self)
color_menu.addAction(darkCyan)
darkCyan.triggered.connect(self.color_darkCyan)
darkBlue = QtWidgets.QAction(QtGui.QIcon("./icons/darkBlueicon.png"), "Blue", self)
color_menu.addAction(darkBlue)
darkBlue.triggered.connect(self.color_darkBlue)
darkMagenta = QtWidgets.QAction(QtGui.QIcon("./icons/darkMagentaicon.png"), "Magenta", self)
color_menu.addAction(darkMagenta)
darkMagenta.triggered.connect(self.color_darkMagenta)
darkRed = QtWidgets.QAction(QtGui.QIcon("./icons/darkRedicon.png"), "Dark Red", self)
color_menu.addAction(darkRed)
darkRed.triggered.connect(self.color_darkRed)
# designation of colors
def color_black(self):
self.brushColor = QtCore.Qt.black
def color_white(self):
self.brushColor = QtCore.Qt.white
def color_darkCyan(self):
self.brushColor = QtCore.Qt.darkCyan
def color_darkBlue(self):
self.brushColor = QtCore.Qt.darkBlue
def color_darkMagenta(self):
self.brushColor = QtCore.Qt.darkMagenta
def color_darkRed(self):
self.brushColor = QtCore.Qt.darkRed
def brushSizeSetup(self):
size4 = QtWidgets.QAction(QtGui.QIcon("./icons/4icon.png"), "4 pixels", self)
size_menu.addAction(size4)
size4.triggered.connect(self.Brush4)
size8 = QtWidgets.QAction(QtGui.QIcon("./icons/8icon.png"), "8 pixels", self)
size_menu.addAction(size8)
size8.triggered.connect(self.Brush8)
size12 = QtWidgets.QAction(QtGui.QIcon("./icons/12icon.png"), "12 pixels", self)
size_menu.addAction(size12)
size12.triggered.connect(self.Brush12)
size16 = QtWidgets.QAction(QtGui.QIcon("./icons/16icon.png"), "16 pixels", self)
size_menu.addAction(size16)
size16.triggered.connect(self.Brush16)
# designation of brush sizes
def Brush4(self):
self.brushSize = 4
def Brush8(self):
self.brushSize = 8
def Brush12(self):
self.brushSize = 12
def Brush16(self):
self.brushSize = 16
# mouse movement and action setup
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.drawing = True
self.lastPoint = event.pos()
def mouseReleaseEvent(self, action):
if action.button() == QtCore.Qt.LeftButton:
self.drawing = False
def mouseMoveEvent(self, event):
if(event.buttons() & QtCore.Qt.LeftButton) & self.drawing:
painter = QtGui.QPainter(self.image)
painter.setPen(QtGui.QPen(self.brushColor, self.brushSize, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
painter.drawLine(self.lastPoint, event.pos())
self.lastPoint = event.pos()
self.update()
# setup of painter
def paintEvent(self, event):
canvasPainter = QtGui.QPainter(self)
canvasPainter.drawImage(self.rect(), self.image, self.image.rect())
| [
"dylansloann2@gmail.com"
] | dylansloann2@gmail.com |
569977b9ce4461b125524e9caad267bb700d509d | 1a3b527145549c7d69f42831ea12c468e1ebb209 | /math.py | ad4498fd08e2a47d2f83cef8a60021d5b965e988 | [] | no_license | muhammadagus030201/finalproject | 7654094d0549122fb14a04441cb606bc3208f972 | 3b3fe4a6e13d94ff217f61e6e8bd5aebec678ddd | refs/heads/main | 2023-03-21T12:23:22.102040 | 2021-03-06T16:11:18 | 2021-03-06T16:11:18 | 345,130,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | x = 10
y = 3
#tambah
z1 = x+y
print("Hasil Pertambahan {} + {} = {}".format(x,y,z1))
#bagi
z2 = x/y
print("Hasil Pembagian {} / {} = {}".format(x,y,z2))
#moduloatausisabagi
z3 = x%y
print("Hasil Modulo {} % {} = {}".format(x,y,z3))
#pangkat
z4 = x**y
print("Hasil Pangkat {} ** {} = {}".format(x,y,z4)) | [
"belajarpython030201@gmail.com"
] | belajarpython030201@gmail.com |
0cd9df6b49c7bef9b37f49d71c4534f94c55be94 | ccf06f8f91a1068fc12edffb379d35dbe4f6388e | /getReplyIds.py | 8c510d9a54bfce6441aacd116135fea88e804c7c | [
"Apache-2.0"
] | permissive | online-behaviour/machine-learning | 46c37c5e37de4323c778bc2ffc80024f4e34a004 | 2ff0e83905985ec644699ece44c75dd7422a7426 | refs/heads/master | 2021-07-09T04:36:48.441324 | 2021-04-28T13:58:00 | 2021-04-28T13:58:00 | 87,834,727 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/python3 -W all
# getReplyIds.py: extract ids and reply-ids from tweets in json
# usage: getReplyIds.py < file
# 20170918 erikt(at)xs4all.nl
import csv
import json
import re
import sys
COMMAND = sys.argv.pop(0)
ID = "id"
REPLYTO = "in_reply_to_status_id"
SCREENNAME = "screen_name"
TEXT = "text"
USER = "user"
outFile = csv.writer(sys.stdout)
for line in sys.stdin:
jsonLine = json.loads(line)
if not ID in jsonLine or not REPLYTO in jsonLine or not TEXT in jsonLine or\
not USER in jsonLine or not SCREENNAME in jsonLine[USER]:
sys.exit(COMMAND+": unexpected line: "+line)
pattern = re.compile("\n")
jsonLine[TEXT] = pattern.sub(" ",jsonLine[TEXT])
outFile.writerow([str(jsonLine[ID]),str(jsonLine[REPLYTO]),\
str(jsonLine[USER][SCREENNAME]),"PARTY",
str(jsonLine[TEXT])])
| [
"erikt@xs4all.nl"
] | erikt@xs4all.nl |
214f9f36330053db1146926c0969362d5663836f | 0eb42eb02dcd217b7c41993a99b3b6628a13a04e | /exponential.py | e2d1af3bcfbc5307cacb4cde47cbd5ed33dd56b4 | [] | no_license | luckysona/shanthiya | d1f29449c9511e33ce382666b53dd35b70534081 | c6d9acfe8e069be5e3c4428e27d4722afd9faa27 | refs/heads/master | 2020-05-25T22:59:50.235797 | 2019-07-23T17:40:53 | 2019-07-23T17:40:53 | 188,025,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | x,y=input().split()
x=int(x)
y=int(y)
if(y==0):
print(x)
else:
print(x**y)
| [
"noreply@github.com"
] | luckysona.noreply@github.com |
11e303d4c69ca7bcedd509112ad6562b91d12bdc | 6a562077f79213f6b2bb89e92d6a16d931268089 | /frappe/core/doctype/data_import/importer_new.py | 6fccbc89ef1f32fc83abe5d05da1ba572513dd91 | [
"MIT"
] | permissive | libracore/frappe | 74fe917b75aa1cfad38c71519914180d5d5f1366 | 92d94a73a3445a252a2828de0053dcce86a18f17 | refs/heads/v12 | 2023-07-17T04:58:08.622228 | 2023-06-28T17:27:33 | 2023-06-28T17:27:33 | 89,392,790 | 6 | 8 | MIT | 2023-08-29T16:29:03 | 2017-04-25T18:19:40 | Python | UTF-8 | Python | false | false | 27,077 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import io
import os
import json
import timeit
import frappe
from datetime import datetime
from frappe import _
from frappe.utils import cint, flt, update_progress_bar
from frappe.utils.csvutils import read_csv_content
from frappe.utils.xlsxutils import (
read_xlsx_file_from_attached_file,
read_xls_file_from_attached_file,
)
from frappe.model import no_value_fields, table_fields
INVALID_VALUES = ["", None]
MAX_ROWS_IN_PREVIEW = 10
# pylint: disable=R0201
class Importer:
def __init__(
self, doctype, data_import=None, file_path=None, content=None, console=False
):
self.doctype = doctype
self.template_options = frappe._dict({"remap_column": {}})
self.console = console
if data_import:
self.data_import = data_import
if self.data_import.template_options:
template_options = frappe.parse_json(self.data_import.template_options)
self.template_options.update(template_options)
else:
self.data_import = None
self.header_row = None
self.data = None
# used to store date formats guessed from data rows per column
self._guessed_date_formats = {}
# used to store eta during import
self.last_eta = 0
# used to collect warnings during template parsing
# and show them to user
self.warnings = []
self.meta = frappe.get_meta(doctype)
self.prepare_content(file_path, content)
self.parse_data_from_template()
def prepare_content(self, file_path, content):
extension = None
if self.data_import and self.data_import.import_file:
file_doc = frappe.get_doc("File", {"file_url": self.data_import.import_file})
content = file_doc.get_content()
extension = file_doc.file_name.split(".")[1]
if file_path:
content, extension = self.read_file(file_path)
if not extension:
extension = "csv"
if content:
self.read_content(content, extension)
self.validate_template_content()
self.remove_empty_rows_and_columns()
def read_file(self, file_path):
extn = file_path.split(".")[1]
file_content = None
with io.open(file_path, mode="rb") as f:
file_content = f.read()
return file_content, extn
def read_content(self, content, extension):
if extension == "csv":
data = read_csv_content(content)
elif extension == "xlsx":
data = read_xlsx_file_from_attached_file(fcontent=content)
elif extension == "xls":
data = read_xls_file_from_attached_file(content)
self.header_row = data[0]
self.data = data[1:]
def validate_template_content(self):
column_count = len(self.header_row)
if any([len(row) != column_count and len(row) != 0 for row in self.data]):
frappe.throw(
_("Number of columns does not match with data"), title=_("Invalid Template")
)
def remove_empty_rows_and_columns(self):
self.row_index_map = []
removed_rows = []
removed_columns = []
# remove empty rows
data = []
for i, row in enumerate(self.data):
if all(v in INVALID_VALUES for v in row):
# empty row
removed_rows.append(i)
else:
data.append(row)
self.row_index_map.append(i)
# remove empty columns
# a column with a header and no data is a valid column
# a column with no header and no data will be removed
header_row = []
for i, column in enumerate(self.header_row):
column_values = [row[i] for row in data]
values = [column] + column_values
if all(v in INVALID_VALUES for v in values):
# empty column
removed_columns.append(i)
else:
header_row.append(column)
data_without_empty_columns = []
# remove empty columns from data
for i, row in enumerate(data):
new_row = [v for j, v in enumerate(row) if j not in removed_columns]
data_without_empty_columns.append(new_row)
self.data = data_without_empty_columns
self.header_row = header_row
def get_data_for_import_preview(self):
out = frappe._dict()
out.data = list(self.rows)
out.columns = self.columns
out.warnings = self.warnings
if len(out.data) > MAX_ROWS_IN_PREVIEW:
out.data = out.data[:MAX_ROWS_IN_PREVIEW]
out.max_rows_exceeded = True
out.max_rows_in_preview = MAX_ROWS_IN_PREVIEW
return out
def parse_data_from_template(self):
columns = self.parse_columns_from_header_row()
columns, data = self.add_serial_no_column(columns, self.data)
self.columns = columns
self.rows = data
def parse_columns_from_header_row(self):
remap_column = self.template_options.remap_column
columns = []
df_by_labels_and_fieldnames = self.build_fields_dict_for_column_matching()
for i, header_title in enumerate(self.header_row):
header_row_index = str(i)
column_number = str(i + 1)
skip_import = False
fieldname = remap_column.get(header_row_index)
if fieldname and fieldname != "Don't Import":
df = df_by_labels_and_fieldnames.get(fieldname)
self.warnings.append(
{
"col": column_number,
"message": _("Mapping column {0} to field {1}").format(
frappe.bold(header_title or "<i>Untitled Column</i>"), frappe.bold(df.label)
),
"type": "info",
}
)
else:
df = df_by_labels_and_fieldnames.get(header_title)
if not df:
skip_import = True
else:
skip_import = False
if fieldname == "Don't Import":
skip_import = True
self.warnings.append(
{
"col": column_number,
"message": _("Skipping column {0}").format(frappe.bold(header_title)),
"type": "info",
}
)
elif header_title and not df:
self.warnings.append(
{
"col": column_number,
"message": _("Cannot match column {0} with any field").format(
frappe.bold(header_title)
),
"type": "info",
}
)
elif not header_title and not df:
self.warnings.append(
{"col": column_number, "message": _("Skipping Untitled Column"), "type": "info"}
)
columns.append(
frappe._dict(
df=df,
skip_import=skip_import,
header_title=header_title,
column_number=column_number,
index=i,
)
)
return columns
def build_fields_dict_for_column_matching(self):
"""
Build a dict with various keys to match with column headers and value as docfield
The keys can be label or fieldname
{
'Customer': df1,
'customer': df1,
'Due Date': df2,
'due_date': df2,
'Item Code (Sales Invoice Item)': df3,
'Sales Invoice Item:item_code': df3,
}
"""
out = {}
table_doctypes = [df.options for df in self.meta.get_table_fields()]
doctypes = table_doctypes + [self.doctype]
for doctype in doctypes:
# name field
name_key = "ID" if self.doctype == doctype else "ID ({})".format(doctype)
name_df = frappe._dict(
{
"fieldtype": "Data",
"fieldname": "name",
"label": "ID",
"reqd": self.data_import.import_type == "Update Existing Records",
"parent": doctype,
}
)
out[name_key] = name_df
out["name"] = name_df
# other fields
meta = frappe.get_meta(doctype)
fields = self.get_standard_fields(doctype) + meta.fields
for df in fields:
fieldtype = df.fieldtype or "Data"
parent = df.parent or self.doctype
if fieldtype not in no_value_fields:
# label as key
label = (
df.label if self.doctype == doctype else "{0} ({1})".format(df.label, parent)
)
out[label] = df
# fieldname as key
if self.doctype == doctype:
out[df.fieldname] = df
else:
key = "{0}:{1}".format(doctype, df.fieldname)
out[key] = df
# if autoname is based on field
# add an entry for "ID (Autoname Field)"
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
out["ID ({})".format(autoname_field.label)] = autoname_field
# ID field should also map to the autoname field
out["ID"] = autoname_field
out["name"] = autoname_field
return out
def get_standard_fields(self, doctype):
meta = frappe.get_meta(doctype)
if meta.istable:
standard_fields = [
{"label": "Parent", "fieldname": "parent"},
{"label": "Parent Type", "fieldname": "parenttype"},
{"label": "Parent Field", "fieldname": "parentfield"},
{"label": "Row Index", "fieldname": "idx"},
]
else:
standard_fields = [
{"label": "Owner", "fieldname": "owner"},
{"label": "Document Status", "fieldname": "docstatus", "fieldtype": "Int"},
]
out = []
for df in standard_fields:
df = frappe._dict(df)
df.parent = doctype
out.append(df)
return out
def add_serial_no_column(self, columns, data):
columns_with_serial_no = [
frappe._dict({"header_title": "Sr. No", "skip_import": True})
] + columns
# update index for each column
for i, col in enumerate(columns_with_serial_no):
col.index = i
data_with_serial_no = []
for i, row in enumerate(data):
data_with_serial_no.append([self.row_index_map[i] + 1] + row)
return columns_with_serial_no, data_with_serial_no
def parse_value(self, value, df):
# convert boolean values to 0 or 1
if df.fieldtype == "Check" and value.lower().strip() in ["t", "f", "true", "false"]:
value = value.lower().strip()
value = 1 if value in ["t", "true"] else 0
if df.fieldtype in ["Int", "Check"]:
value = cint(value)
elif df.fieldtype in ["Float", "Percent", "Currency"]:
value = flt(value)
elif df.fieldtype in ["Date", "Datetime"]:
value = self.parse_date_format(value, df)
return value
def parse_date_format(self, value, df):
date_format = self.guess_date_format_for_column(df.fieldname)
if date_format:
return datetime.strptime(value, date_format)
return value
def guess_date_format_for_column(self, fieldname):
""" Guesses date format for a column by parsing the first 10 values in the column,
getting the date format and then returning the one which has the maximum frequency
"""
PARSE_ROW_COUNT = 10
if not self._guessed_date_formats.get(fieldname):
column_index = -1
for i, field in enumerate(self.header_row):
if self.meta.has_field(field) and field == fieldname:
column_index = i
break
if column_index == -1:
self._guessed_date_formats[fieldname] = None
date_values = [
row[column_index] for row in self.data[:PARSE_ROW_COUNT] if row[column_index]
]
date_formats = [guess_date_format(d) for d in date_values]
if not date_formats:
return
max_occurred_date_format = max(set(date_formats), key=date_formats.count)
self._guessed_date_formats[fieldname] = max_occurred_date_format
return self._guessed_date_formats[fieldname]
def import_data(self):
# set user lang for translations
frappe.cache().hdel("lang", frappe.session.user)
frappe.set_user_lang(frappe.session.user)
if not self.console:
self.data_import.db_set("template_warnings", "")
# set flags
frappe.flags.in_import = True
frappe.flags.mute_emails = self.data_import.mute_emails
# prepare a map for missing link field values
self.prepare_missing_link_field_values()
# parse docs from rows
payloads = self.get_payloads_for_import()
# dont import if there are non-ignorable warnings
warnings = [w for w in self.warnings if w.get("type") != "info"]
if warnings:
if self.console:
self.print_grouped_warnings(warnings)
else:
self.data_import.db_set("template_warnings", json.dumps(warnings))
frappe.publish_realtime(
"data_import_refresh", {"data_import": self.data_import.name}
)
return
# setup import log
if self.data_import.import_log:
import_log = frappe.parse_json(self.data_import.import_log)
else:
import_log = []
# remove previous failures from import log
import_log = [l for l in import_log if l.get("success") == True]
# get successfully imported rows
imported_rows = []
for log in import_log:
log = frappe._dict(log)
if log.success:
imported_rows += log.row_indexes
# start import
total_payload_count = len(payloads)
batch_size = frappe.conf.data_import_batch_size or 1000
for batch_index, batched_payloads in enumerate(
frappe.utils.create_batch(payloads, batch_size)
):
for i, payload in enumerate(batched_payloads):
doc = payload.doc
row_indexes = [row[0] for row in payload.rows]
current_index = (i + 1) + (batch_index * batch_size)
if set(row_indexes).intersection(set(imported_rows)):
print("Skipping imported rows", row_indexes)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"skipping": True,
"data_import": self.data_import.name,
},
)
continue
try:
start = timeit.default_timer()
doc = self.process_doc(doc)
processing_time = timeit.default_timer() - start
eta = self.get_eta(current_index, total_payload_count, processing_time)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"docname": doc.name,
"data_import": self.data_import.name,
"success": True,
"row_indexes": row_indexes,
"eta": eta,
},
)
if self.console:
update_progress_bar(
"Importing {0} records".format(total_payload_count),
current_index,
total_payload_count,
)
import_log.append(
frappe._dict(success=True, docname=doc.name, row_indexes=row_indexes)
)
# commit after every successful import
frappe.db.commit()
except Exception:
import_log.append(
frappe._dict(
success=False,
exception=frappe.get_traceback(),
messages=frappe.local.message_log,
row_indexes=row_indexes,
)
)
frappe.clear_messages()
# rollback if exception
frappe.db.rollback()
# set status
failures = [l for l in import_log if l.get("success") == False]
if len(failures) == total_payload_count:
status = "Pending"
elif len(failures) > 0:
status = "Partial Success"
else:
status = "Success"
if self.console:
self.print_import_log(import_log)
else:
self.data_import.db_set("status", status)
self.data_import.db_set("import_log", json.dumps(import_log))
frappe.flags.in_import = False
frappe.flags.mute_emails = False
frappe.publish_realtime("data_import_refresh", {"data_import": self.data_import.name})
return import_log
def get_payloads_for_import(self):
payloads = []
# make a copy
data = list(self.rows)
while data:
doc, rows, data = self.parse_next_row_for_import(data)
payloads.append(frappe._dict(doc=doc, rows=rows))
return payloads
def parse_next_row_for_import(self, data):
"""
Parses rows that make up a doc. A doc maybe built from a single row or multiple rows.
Returns the doc, rows, and data without the rows.
"""
doctypes = set([col.df.parent for col in self.columns if col.df and col.df.parent])
# first row is included by default
first_row = data[0]
rows = [first_row]
# if there are child doctypes, find the subsequent rows
if len(doctypes) > 1:
# subsequent rows either dont have any parent value set
# or have the same value as the parent row
# we include a row if either of conditions match
parent_column_indexes = [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == self.doctype
]
parent_row_values = [first_row[i] for i in parent_column_indexes]
data_without_first_row = data[1:]
for row in data_without_first_row:
row_values = [row[i] for i in parent_column_indexes]
# if the row is blank, it's a child row doc
if all([v in INVALID_VALUES for v in row_values]):
rows.append(row)
continue
# if the row has same values as parent row, it's a child row doc
if row_values == parent_row_values:
rows.append(row)
continue
# if any of those conditions dont match, it's the next doc
break
def get_column_indexes(doctype):
return [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == doctype
]
def validate_value(value, df):
if df.fieldtype == "Select":
select_options = df.get_select_options()
if select_options and value not in select_options:
options_string = ", ".join([frappe.bold(d) for d in select_options])
msg = _("Value must be one of {0}").format(options_string)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return False
elif df.fieldtype == "Link":
d = self.get_missing_link_field_values(df.options)
if value in d.missing_values and not d.one_mandatory:
msg = _("Value {0} missing for {1}").format(
frappe.bold(value), frappe.bold(df.options)
)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return value
return value
def parse_doc(doctype, docfields, values, row_number):
# new_doc returns a dict with default values set
doc = frappe.new_doc(doctype, as_dict=True)
# remove standard fields and __islocal
for key in frappe.model.default_fields + ("__islocal",):
doc.pop(key, None)
for df, value in zip(docfields, values):
if value in INVALID_VALUES:
value = None
value = validate_value(value, df)
if value:
doc[df.fieldname] = self.parse_value(value, df)
check_mandatory_fields(doctype, doc, row_number)
return doc
def check_mandatory_fields(doctype, doc, row_number):
# check if mandatory fields are set (except table fields)
meta = frappe.get_meta(doctype)
fields = [
df
for df in meta.fields
if df.fieldtype not in table_fields
and df.reqd
and doc.get(df.fieldname) in INVALID_VALUES
]
if not fields:
return
if len(fields) == 1:
self.warnings.append(
{
"row": row_number,
"message": _("{0} is a mandatory field").format(fields[0].label),
}
)
else:
fields_string = ", ".join([df.label for df in fields])
self.warnings.append(
{"row": row_number, "message": _("{0} are mandatory fields").format(fields_string)}
)
parsed_docs = {}
for row in rows:
for doctype in doctypes:
if doctype == self.doctype and parsed_docs.get(doctype):
# if parent doc is already parsed from the first row
# then skip
continue
row_number = row[0]
column_indexes = get_column_indexes(doctype)
values = [row[i] for i in column_indexes]
if all(v in INVALID_VALUES for v in values):
# skip values if all of them are empty
continue
columns = [self.columns[i] for i in column_indexes]
docfields = [col.df for col in columns]
doc = parse_doc(doctype, docfields, values, row_number)
parsed_docs[doctype] = parsed_docs.get(doctype, [])
parsed_docs[doctype].append(doc)
# build the doc with children
doc = {}
for doctype, docs in parsed_docs.items():
if doctype == self.doctype:
doc.update(docs[0])
else:
table_dfs = self.meta.get(
"fields", {"options": doctype, "fieldtype": ["in", table_fields]}
)
if table_dfs:
table_field = table_dfs[0]
doc[table_field.fieldname] = docs
# check if there is atleast one row for mandatory table fields
mandatory_table_fields = [
df
for df in self.meta.fields
if df.fieldtype in table_fields and df.reqd and len(doc.get(df.fieldname, [])) == 0
]
if len(mandatory_table_fields) == 1:
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for {0} table").format(
mandatory_table_fields[0].label
),
}
)
elif mandatory_table_fields:
fields_string = ", ".join([df.label for df in mandatory_table_fields])
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for the following tables: {0}").format(fields_string),
}
)
return doc, rows, data[len(rows) :]
def process_doc(self, doc):
import_type = self.data_import.import_type
if import_type == "Insert New Records":
return self.insert_record(doc)
elif import_type == "Update Existing Records":
return self.update_record(doc)
def insert_record(self, doc):
self.create_missing_linked_records(doc)
new_doc = frappe.new_doc(self.doctype)
new_doc.update(doc)
# name shouldn't be set when inserting a new record
new_doc.set("name", None)
new_doc.insert()
if self.meta.is_submittable and self.data_import.submit_after_import:
new_doc.submit()
return new_doc
def create_missing_linked_records(self, doc):
"""
Finds fields that are of type Link, and creates the corresponding
document automatically if it has only one mandatory field
"""
link_values = []
def get_link_fields(doc, doctype):
for fieldname, value in doc.items():
meta = frappe.get_meta(doctype)
df = meta.get_field(fieldname)
if not df:
continue
if df.fieldtype == "Link" and value not in INVALID_VALUES:
link_values.append([df.options, value])
elif df.fieldtype in table_fields:
for row in value:
get_link_fields(row, df.options)
get_link_fields(doc, self.doctype)
for link_doctype, link_value in link_values:
d = self.missing_link_values.get(link_doctype)
if d and d.one_mandatory and link_value in d.missing_values:
# find the autoname field
autoname_field = self.get_autoname_field(link_doctype)
name_field = autoname_field.fieldname if autoname_field else "name"
new_doc = frappe.new_doc(link_doctype)
new_doc.set(name_field, link_value)
new_doc.insert()
d.missing_values.remove(link_value)
def update_record(self, doc):
id_fieldname = self.get_id_fieldname()
id_value = doc[id_fieldname]
existing_doc = frappe.get_doc(self.doctype, id_value)
existing_doc.flags.via_data_import = self.data_import.name
existing_doc.update(doc)
existing_doc.save()
return existing_doc
def export_errored_rows(self):
from frappe.utils.csvutils import build_csv_response
if not self.data_import:
return
import_log = frappe.parse_json(self.data_import.import_log or "[]")
failures = [l for l in import_log if l.get("success") == False]
row_indexes = []
for f in failures:
row_indexes.extend(f.get("row_indexes", []))
# de duplicate
row_indexes = list(set(row_indexes))
row_indexes.sort()
header_row = [col.header_title for col in self.columns[1:]]
rows = [header_row]
rows += [row[1:] for row in self.rows if row[0] in row_indexes]
build_csv_response(rows, self.doctype)
def get_missing_link_field_values(self, doctype):
return self.missing_link_values.get(doctype, {})
def prepare_missing_link_field_values(self):
columns = self.columns
rows = self.rows
link_column_indexes = [
col.index for col in columns if col.df and col.df.fieldtype == "Link"
]
self.missing_link_values = {}
for index in link_column_indexes:
col = columns[index]
column_values = [row[index] for row in rows]
values = set([v for v in column_values if v not in INVALID_VALUES])
doctype = col.df.options
missing_values = [value for value in values if not frappe.db.exists(doctype, value)]
if self.missing_link_values.get(doctype):
self.missing_link_values[doctype].missing_values += missing_values
else:
self.missing_link_values[doctype] = frappe._dict(
missing_values=missing_values,
one_mandatory=self.has_one_mandatory_field(doctype),
df=col.df,
)
def get_id_fieldname(self):
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
return autoname_field.fieldname
return "name"
def get_eta(self, current, total, processing_time):
remaining = total - current
eta = processing_time * remaining
if not self.last_eta or eta < self.last_eta:
self.last_eta = eta
return self.last_eta
def has_one_mandatory_field(self, doctype):
meta = frappe.get_meta(doctype)
# get mandatory fields with default not set
mandatory_fields = [df for df in meta.fields if df.reqd and not df.default]
mandatory_fields_count = len(mandatory_fields)
if meta.autoname and meta.autoname.lower() == "prompt":
mandatory_fields_count += 1
return mandatory_fields_count == 1
def get_autoname_field(self, doctype):
meta = frappe.get_meta(doctype)
if meta.autoname and meta.autoname.startswith("field:"):
fieldname = meta.autoname[len("field:") :]
return meta.get_field(fieldname)
def print_grouped_warnings(self, warnings):
warnings_by_row = {}
other_warnings = []
for w in warnings:
if w.get("row"):
warnings_by_row.setdefault(w.get("row"), []).append(w)
else:
other_warnings.append(w)
for row_number, warnings in warnings_by_row.items():
print("Row {0}".format(row_number))
for w in warnings:
print(w.get("message"))
for w in other_warnings:
print(w.get("message"))
def print_import_log(self, import_log):
failed_records = [l for l in import_log if not l.success]
successful_records = [l for l in import_log if l.success]
if successful_records:
print(
"Successfully imported {0} records out of {1}".format(
len(successful_records), len(import_log)
)
)
if failed_records:
print("Failed to import {0} records".format(len(failed_records)))
file_name = '{0}_import_on_{1}.txt'.format(self.doctype, frappe.utils.now())
print('Check {0} for errors'.format(os.path.join('sites', file_name)))
text = ""
for w in failed_records:
text += "Row Indexes: {0}\n".format(str(w.get('row_indexes', [])))
text += "Messages:\n{0}\n".format('\n'.join(w.get('messages', [])))
text += "Traceback:\n{0}\n\n".format(w.get('exception'))
with open(file_name, 'w') as f:
f.write(text)
DATE_FORMATS = [
r"%d-%m-%Y",
r"%m-%d-%Y",
r"%Y-%m-%d",
r"%d-%m-%y",
r"%m-%d-%y",
r"%y-%m-%d",
r"%d/%m/%Y",
r"%m/%d/%Y",
r"%Y/%m/%d",
r"%d/%m/%y",
r"%m/%d/%y",
r"%y/%m/%d",
r"%d.%m.%Y",
r"%m.%d.%Y",
r"%Y.%m.%d",
r"%d.%m.%y",
r"%m.%d.%y",
r"%y.%m.%d",
]
TIME_FORMATS = [
r"%H:%M:%S.%f",
r"%H:%M:%S",
r"%H:%M",
r"%I:%M:%S.%f %p",
r"%I:%M:%S %p",
r"%I:%M %p",
]
def guess_date_format(date_string):
date_string = date_string.strip()
_date = None
_time = None
if " " in date_string:
_date, _time = date_string.split(" ", 1)
else:
_date = date_string
date_format = None
time_format = None
for f in DATE_FORMATS:
try:
# if date is parsed without any exception
# capture the date format
datetime.strptime(_date, f)
date_format = f
break
except ValueError:
pass
if _time:
for f in TIME_FORMATS:
try:
# if time is parsed without any exception
# capture the time format
datetime.strptime(_time, f)
time_format = f
break
except ValueError:
pass
full_format = date_format
if time_format:
full_format += " " + time_format
return full_format
def import_data(doctype, file_path):
i = Importer(doctype, file_path)
i.import_data()
| [
"netchamp.faris@gmail.com"
] | netchamp.faris@gmail.com |
c257da7a0180dbf630338ad35acd1a55e212f6fa | 703aa4509109552e91e1f3db39146f723b6256d0 | /motores.py | 5f695b4f176b91edc5fdb5ca8ec9aa0d10a29a45 | [] | no_license | alfredobs97/PythonMysqlScript | 2334fb0392b8039862e77c382d7a23b4763bc8ed | 91bcaab69ee4f518697cc3dd15e7bf43bab4465c | refs/heads/master | 2021-01-13T15:47:43.093460 | 2017-02-09T16:59:01 | 2017-02-09T16:59:01 | 79,963,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #!/usr/bin/python
import MySQLdb
db = MySQLdb.connect(host="192.168.8.16",
user="root",
passwd="1234",
db="mysql")
cur = db.cursor()
cur.execute("SHOW ENGINES")
ver = cur.fetchall()
print "Version de Mysql : %s" %ver
| [
"alfredobautista1@gmail.com"
] | alfredobautista1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.