text stringlengths 8 6.05M |
|---|
class Check_negative_positve:
def __init__(self):
x = input("input number")
try:
self.x = float(x)
except ValueError:
print("input is not a number")
Check_negative_positve()
def Check(self):
if self.x < 0:
print("this is a negative number")
elif self.x == 0:
print("this number is zero")
else:
print("this number is positive")
input("press enter to continue...")
|
"""
This script reads power through the wire of a 220v AC sine signal
This script can be called in console for debug purpose
If called by an external software (eg Chaudiere app), the entry point is api_get_watt_values()
Hardware interface
SCT-013-030-30A-1V-ac-current-sensor is connected to ADS1115
ADS1115 isconnected to Raspberry via i2C
ADS1115 has four analog inputs
analog input is biased to 3,3v/2 = 1.6v
With no power in AC this should give in theory 1600 mv read from ADC
In Practice, around 1635 to 1640 is read. So we consider that below 1650 (MIN_VALUE), the AC power is 0
"""
import ADS1115
import os, sys, argparse, string, datetime, time
import logging, logging.config
import glob
# Script Constants
WATT_SENSOR_SIZE = 4
DEFAULT_SENSOR_VALUE = None #if no sensor value is read then recorded value is DEFAULT_SENSOR_VALUE
MIN_VALUE = 1654 #if sensor value < MIN_VALUE then recorded value is 0
currentpath = os.path.abspath(os.path.dirname(__file__)) # /home/pi/Dev/chaudiere/script
projectpath = os.path.dirname(currentpath) # /home/pi/Dev/chaudiere
envpath = os.path.dirname(projectpath) # /home/pi/Dev
envname = os.path.basename(envpath) # Dev
# import and get logger
logger_directory = os.path.join(projectpath, 'logger')
sys.path.append(logger_directory)
import logger_stdout
logger = logging.getLogger(__name__)
"""
Return a list of four integer values read from ADC
"""
def api_get_watt_values():
values = get_watt_values()
if not values:
logger.warning("get watt failed, returning default sensor value")
values = []
for n in range(0, WATT_SENSOR_SIZE):
values.append(DEFAULT_SENSOR_VALUE)
return (values)
# Replace value lower than MIN_VALUE to zero (int)
def get_watt_values():
checkedValues = read_adc()
#convert values to int and to 0 if sensor value < MIN_VALUE
if checkedValues:
values = [0 if int(x)<MIN_VALUE else int(x) for x in checkedValues]
#logger.debug(values)
return values
else:
return False
# Read values (mV) from the four inputs of ADS1115
# Return a list of four values [ A0, A1, A2, A3]
# condidering that 220v AC signal is sine, multiples samples are read and the max is returned (approx equal to the max value of the sine signal)
def read_adc():
try:
# Initialize ADC converter
ads = ADS1115.ADS1115()
except IOError as e:
logger.error(f'ADS1115 not available: {e}')
return False
except Exception as e:
logger.error(f'Exception {e}', exc_info=True)
# raise # print traceback / raise to higher level
return False
else:
num_samples = 30
channels = [0, 1, 2, 3]
max_voltage = [0, 0, 0, 0]
for ch in channels:
for x in range(0, num_samples):
voltage = ads.readADCSingleEnded(channel=ch)
max_voltage[ch] = max(voltage, max_voltage[ch])
return max_voltage
def debug_read_channels():
ads = ADS1115.ADS1115()
num_samples = 30
channels = [0, 1, 2, 3]
for ch in channels:
max_voltage = 0
for x in range(0, num_samples):
voltage = ads.readADCSingleEnded(channel=ch)
max_voltage = max(voltage, max_voltage)
value = '{:4.0f}'.format(max_voltage)
logger.info('channel '+ str(ch) + '\t'+str(value))
# For debug purpose, calling main() read values from ADC and print to console every 1 second
def main():
while True:
# values = read_adc()
# values = ['{:4.0f}'.format(value) for value in values]
# logger.info(values)
values = get_watt_values()
values = ['{:4.0f}'.format(value) for value in values]
logger.info(values)
#debug_read_channels()
time.sleep(0.5)
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
import json
import re
import datetime
import jsonpickle
"""
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class DateTimeDecoder(json.JSONDecoder):
'''
decodifica una fecha en formato iso
2016-01-0717:54:20.928462
'''
def __init__(self):
super().__init__()
self.format = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d*$')
def decode(self, str):
if self.re.match(str):
d = datetime.datetime.strptime(str, "%Y-%m-%dT%H:%M:%S")
return d
else:
return super.decode(str)
"""
class Serializer:
@staticmethod
def dumps(obj):
''' return json.dumps(obj, cls=DateTimeEncoder) '''
return jsonpickle.encode(obj)
@staticmethod
def loads(str):
''' json.loads(str, cls=DateTimeDecoder) '''
return jsonpickle.decode(str)
|
#! python3
# downloadXKCD.py - Downloads EVERY single XKCD comic!
import requests, os, bs4
comicUrl = ''
url = 'https://xkcd.com' # Starting url
os.makedirs('xkcd', exist_ok=True) #Store comics in ./xkcd/comics
while not url.endswith('#'):
# Download the page
print('Downloading page %s...' % url)
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
# Find the URL of the comic image
comicElem = soup.select('#comic img')
if (comicElem == []):
print('Could not find the comic image.')
else:
comicUrl = 'https:' + comicElem[0].get('src')
# Download the image.
print('Downloading image %s...' % (comicUrl))
res = requests.get(comicUrl)
res.raise_for_status()
# Save the image file to ./xkcd
with open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb') as imageFile:
for chunk in res.iter_content(100000):
imageFile.write(chunk)
# Get the Prev button's url
prevLink = soup.select('a[rel="prev", accesskey="p"]')[0]
url = 'https://xkcd.com' + prevLink.get("href")
# TODO: Download the page.
# TODO: Find the url of the comic image
# TODO: Save the image to ./xkcd
# TODO: Get the Prev button's url.
print('Done.')
|
import cv
from PyQt4 import QtCore
class CameraDevice(QtCore.QObject):
_DEFAULT_FPS = 30
newFrame = QtCore.pyqtSignal(cv.iplimage)
def __init__(self, cameraId=0, mirrored=False, parent=None):
super(CameraDevice, self).__init__(parent)
self.mirrored = mirrored
self._cameraDevice = cv.CaptureFromCAM(cameraId)
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self._queryFrame)
self._timer.setInterval(1000/self.fps)
self.paused = False
@QtCore.pyqtSlot()
def _queryFrame(self):
frame = cv.QueryFrame(self._cameraDevice)
if self.mirrored:
mirroredFrame = cv.CreateImage(cv.GetSize(frame), frame.depth, \
frame.nChannels)
cv.Flip(frame, mirroredFrame, 1)
frame = mirroredFrame
self.newFrame.emit(frame)
@property
def paused(self):
return not self._timer.isActive()
@paused.setter
def paused(self, p):
if p:
self._timer.stop()
else:
self._timer.start()
@property
def frameSize(self):
w = cv.GetCaptureProperty(self._cameraDevice, \
cv.CV_CAP_PROP_FRAME_WIDTH)
h = cv.GetCaptureProperty(self._cameraDevice, \
cv.CV_CAP_PROP_FRAME_HEIGHT)
return int(w), int(h)
@property
def fps(self):
fps = int(cv.GetCaptureProperty(self._cameraDevice, cv.CV_CAP_PROP_FPS))
if not fps > 0:
fps = self._DEFAULT_FPS
return fps
|
from flask import Flask
from flask import request
import inputParsing.equationParse as rpn
app = Flask(__name__)
@app.route("/")
def main():
return rpn.rpnToString(rpn.shuntingYardAlgorithm('( ( 15 / ( 7 - ( 1 + 1 ) ) ) * 3 ) - ( 2.4 + ( 1 + 1 ) )'))
@app.route("/api/infixNotation")
def infix():
if 'q' not in request.args:
return "you absolute fool... enter a query with the parameter 'q'!"
return rpn.rpnToString(rpn.shuntingYardAlgorithm(request.args['q']))
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=80)
|
import smartpy as sp
class Transaction(sp.Contract):
def __init__(self):
self.init(
origin = sp.address('tz1'),
destiny = sp.address('tz2'),
amount = sp.tez(0),
immutability = sp.bool(False)
)
@sp.entry_point
def transaction(self, params):
sp.verify(self.data.immutability == False)
# Transfer transaction amount to contract's balance (forwarding)
sp.balance = sp.amount
# Register transaction
self.data.origin = sp.sender
self.data.destiny = params.destiny
self.data.amount = sp.amount
# Execute transaction
sp.send(params.destiny, sp.amount)
# Set immutability
self.data.immutability = True
@sp.add_test(name = "Test_Transaction")
def test():
scenario = sp.test_scenario()
contract = Transaction()
scenario += contract
scenario.h3("Transact")
scenario += contract.transaction(destiny=sp.address("tz1")).run(sender=sp.address("tz2"), amount=sp.tez(3000))
|
class Solution:
# https://leetcode.com/problems/median-of-two-sorted-arrays/discuss/2511/Intuitive-Python-O(log-(m+n))-solution-by-kth-smallest-in-the-two-sorted-arrays-252ms
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
length = len(nums1) + len(nums2)
if length % 2 == 1:
return self.getKth(nums1, nums2, length // 2 + 1)
else:
return (self.getKth(nums1, nums2, length // 2) + self.getKth(nums1, nums2, length // 2 + 1)) / 2
def getKth(self, a, b, k):
m, n = len(a), len(b)
if(m > n):
return self.getKth(b, a, k)
if not a:
return b[k-1]
if k == 1:
return min(a[0], b[0])
i = min(k // 2, m)
j = k - i
if a[i-1] < b[j-1]:
return self.getKth(a[i:], b, j)
elif a[i-1] > b[j-1]:
return self.getKth(a, b[j:], i)
else:
return a[i-1]
|
import ntdll
import kernel32
api_defs = {}
api_defs.update(ntdll.api_defs)
api_defs.update(kernel32.api_defs)
def getImportApi(impname):
impname = impname.lower()
return api_defs.get(impname)
|
import secrets
secretsgen = secrets.SystemRandom()
print("Generating 6 digits random OTP ")
otp = secretsgen.randrange(100000,999999)
print("Secure random One-Time-Password(OTP) ", otp) |
#
# @author: ChrisMCodes
#
# purpose: mostly just a general webscraper
# (with a few fun features after the scrape)
# Users can export scraped data to txt
# or CSV after scraping.
#
# fun fact: my IDE doesn't seem to recognize the shebang *facepalm*
#!/usr/bin/env python3
# change python3 to python in the shebang above for Windows environments
#
# imports
import os
import sys
import time
from bs4 import BeautifulSoup
import requests
#
# This is the scraper class
#
# It prompts for a site and class to scrape
# (future iterations of this may have more than just class scraping)
# then returns the data, as well as the number of results.
#
# Results can be outputted to a CSV or TXT
#
class Pagedata:
page = "" # stores the URL of the page being scraped
cls = "" # saves the class to scrape...for now, we're only scraping classes
count = 0 # saves the number of results
results = "" # saves the output of the scrape
def __init__(self, website):
self.open_page(website)
self.always_do_these()
self.additional_options()
def always_do_these(self):
"""
This method is really just an extension of __init__
It calls the methods that will always apply when this program is run
:return:
"""
data_attr = self.get_attr()
self.scrape_class(data_attr)
self.results = self.beautiful_scrape(data_attr)
self.print_results(self.results)
def additional_options(self):
"""
These are the optional methods that may or may not apply when the program is run,
plus a method used to exit the program.
:return:
"""
answer = 200
while answer:
print("\n\nPlease choose from the following options: ")
print("Quit program: 0")
print("Write results to text file: 1")
print("Write results to CSV: 2")
print("Read joke about Soviet Russia: 3")
print("View the first few terms of the Fibonacci Sequence: 4")
try:
answer = int(input())
except ValueError:
print("Input not valid")
answer = 200
except TypeError:
print("Input not valid")
answer = 200
finally:
if answer not in [0, 1, 2, 3, 4]:
print("Input not valid")
answer = 200
continue
self.go_to_method(answer)
def go_to_method(self, answer):
"""
This is called within additional_options().
Its sole purpose is to take the input and call the appropriate method
:param answer: int
:return:
"""
if answer == 0:
self.exit_program()
elif answer == 1:
self.txt_file()
elif answer == 2:
self.csv_file()
elif answer == 3:
self.soviet_joke()
else:
self.fibonacci()
def exit_program(self):
"""
Exits gracefully
:return:
"""
print("Goodbye!")
time.sleep(1)
sys.exit(0)
def txt_file(self):
"""
Initiates a TextOutput object
and uses its write_to_text_file() method
to create a .txt of the data
:return:
"""
try:
file = TextOutput()
file.write_to_txt_file(self.results)
except Exception as e:
print("Something went wrong.")
print("The computer provides this message: " + e)
print("Exiting.")
def csv_file(self):
"""
Initiates a CSVOutput object
and uses its write_to_csv_file() method
to create a .csv of the data
:return:
"""
try:
file = CSVOutput()
file.write_to_csv_file(self.results)
except Exception as e:
print("Something went wrong.")
print("The computer provides this message: " + e)
print("Exiting.")
def soviet_joke(self):
"""
This method is just comic relief.
It can be safely removed with no consequences to the rest of the program.
(Just be sure to update the additional_options() method an the go_to_method()
method accordingly)
:return:
"""
print("\nA Soviet judge walks out of his chambers.")
print("He can be heard laughing uproariously.")
print("A colleague sees him and asks, \"What's so funny?\"")
print("\n\"I've just heard the greatest joke!\" he answers.")
time.sleep(1)
print("\nA bit of time passes...")
for i in range(2):
for i in range(3):
time.sleep(0.5)
print(".", end="")
print()
print("\n\"Well?\" asks the colleague, \"What was the joke?!\"")
print("\nThe judge thinks for a second and answers, \"Oh, it would be imprudent to say.")
print("I just gave the gentleman in my courtroom ten years of hard labor for telling it!\"\n")
def fibonacci(self, a=0, b=1, i=0, terms=10):
"""
Fibonacci sequences are fun!
:param a: int current value
:param b: int next value
:param i: int current iteration
:param terms: int number of iterations
:return:
"""
if i == 0:
print("How many terms of the sequence would you like to see?")
try:
terms = int(input())
if terms < 0:
print("Invalid number of terms.")
terms = 10
print("Number of terms has been set to 10")
except:
print("Invalid input")
print("Number of terms has been set to 10")
terms = 10
if i == terms - 1:
print("\nFinal term: {:,}".format(a))
return
if i == terms - 2:
print("{:,}".format(a))
elif i % 10 == 0:
print("\n{:,}".format(a), end="; ")
else:
print("{:,}".format(a), end="; ")
c = a + b
a = b
b = c
i += 1
return self.fibonacci(a, b, i, terms)
def cycle_through_results(self):
"""
prints scraped data
:return:
"""
print("\n\n")
i = 0
for result in self.results:
i += 1
print(f"Result #{i}: {result.get_text()}\n")
def open_page(self, website):
"""
:param website: str URL
:return:
"""
try:
self.page = requests.get(website)
except Exception as ex:
print("Something went wrong: ")
print(ex)
print("The program will now exit. Goodbye!")
self.exit_program()
def get_attr(self):
"""
selects id/element/class
"""
choices = {1: "", 2: "class_=", 3: "id="}
valid = False
while not valid:
print("\nHow would you like to search the page?")
print("Search by element: 1")
print("Search by class: 2")
print("Search by id: 3")
try:
choice = int(
input("Please enter the number that corresponds to your choice: "))
except:
print("Invalid choice.\n")
choice = 0
valid = choice in choices
return choices[choice]
def scrape_class(self, data_attr):
"""
This is really just getting the HTML
element, id, or class to scrape.
:return:
"""
attr_name = "id"
if data_attr == "":
attr_name = "element"
elif data_attr == "class_=":
attr_name = "class"
print(f"Please enter {attr_name} to scrape: ", end="")
self.cls = input()
def beautiful_scrape(self, data_attr):
"""
parses HTML of site
creates a list of results
saves results to self.results
and returns results (they're currently never used,
but they may be in the future)
:return:
"""
soup = BeautifulSoup(self.page.content, "html.parser")
if data_attr == "class_=":
resulting_text = soup.find_all(class_=self.cls)
elif data_attr == "id=":
resulting_text = soup.find_all(id=self.cls)
else:
resulting_text = soup.find_all(self.cls)
self.results = resulting_text
return resulting_text
def print_results(self, results):
"""
prints the number of results and calls cycle_through_results()
:param results: list of scraped results
:return:
"""
self.count = len(results)
print(f"NUMBER OF RESULTS: {self.count}")
self.cycle_through_results()
#
# This class outputs the results to a CSV
# It has undergone limited testing,
# so please feel free to break it and
# let me know what needs to be updated
#
class CSVOutput:
direction = -1
filename = ""
def __init__(self):
self.get_filename()
self.check_filename()
self.get_direction()
def get_filename(self):
"""
gets filename from user
:return:
"""
self.filename = input(
"Please enter the name of the file you would like to write: ")
def check_filename(self):
"""
adds .csv to filename if it does not yet exist
:return:
"""
if self.filename[-4:] != ".csv":
self.filename += ".csv"
def get_direction(self):
"""
Determines whether to print a column or a row of data
:return:
"""
valid = False
while not valid:
print("Please choose one of the following directions: ")
print("1 for vertical. Your data would look like this on a spreadsheet: "
"\ndata\ndata\ndata")
print("2 for horizontal. Your data would look like this on a spreadsheet: "
"data, data, data")
try:
self.direction = int(input())
except ValueError:
print("Invalid input")
except TypeError:
print("Invalid input")
except: # yes, I know that pep8 doesn't like this
print("Invalid input")
finally:
if self.direction not in [1, 2]:
print("Invalid input.")
else:
valid = True
def write_to_csv_file(self, results):
"""
Writes csv to current directory
:param results: list of data
:return:
"""
try:
full_path = "./" + self.filename
with open(full_path, "w") as csv_file:
for result in results:
if self.direction == 1:
result = result.get_text().strip()
result = result.replace("\n", "")
csv_file.write(result + "\n")
else:
result = result.get_text().strip()
result = result.replace("\n", "")
csv_file.write(result + ",")
except:
print("Something went wrong. Please try again.")
sys.exit(1)
print("File written successfully in location: " + os.curdir)
#
# Exports data to text file
# Like the CSVOutput class,
# this class has undergone very little testing
#
class TextOutput:
filename = ""
def __init__(self):
self.get_filename()
self.check_filename()
def get_filename(self):
"""
gets filename from user
:return:
"""
self.filename = input(
"Please enter the name of the file you would like to write: ")
def check_filename(self):
"""
adds .txt extension to file if it does not yet exist
:return:
"""
if self.filename[-4:] != ".txt":
self.filename += ".txt"
def write_to_txt_file(self, results):
"""
exports data to .txt in current directory
:param results: list of data
:return:
"""
try:
full_path = "./" + self.filename
with open(full_path, "w") as txt_file:
for result in results:
result = result.get_text().strip()
result = result.replace("\n", "")
txt_file.write(result + "\n")
except Exception as e:
print(e)
print("Something went wrong. Please try again.")
sys.exit(1)
print("File written successfully in location: " + os.curdir)
#
# spoiler: __name__ DOES == '__main__'!!!!!!!!!!!!!11111omgomgomgOMGOMGOMG
#
# This is a rather silly convention, isn't it?
#
if __name__ == '__main__':
# This is my only global(ish). Gets URL from user
site = input("Please enter URL to scrape: ")
Pagedata(site) # <-- The magic and the madness happen here
# Here's a comment just to annoy pep8 dictators ;-)
|
from rest_framework import serializers
from accounts.models import User
from manager.models import certPage
class AIinfoSerialilzer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(
help_text='유저',
queryset=User.objects.all()
)
time = serializers.DateTimeField(
help_text='모델이 판단한 시간'
)
mouse_prediction = serializers.FloatField(
help_text='AI가 판단한 총 확률'
)
resource_prediction = serializers.FloatField(
help_text='AI가 판단한 총 확률'
)
# total_prediction = serializers.CharField(
# help_text='AI가 판단한 총 확률'
# )
type = serializers.IntegerField(
help_text='계정 비활성화:2 / 벌점: 3'
)
label = serializers.CharField(
default=user.pk_field,
help_text='label'
)
mouse_file_list = serializers.CharField(
help_text='마우스 패턴 파일'
)
resource_file_list = serializers.CharField(
help_text='리소스 패턴 파일'
)
class Meta:
model = certPage
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class UserPatternSerializer(serializers.Serializer):
user = serializers.CharField(
help_text='사용자계정',
)
is_user_block = serializers.BooleanField(
help_text='사용자계정 차단 여부',
)
mouse_file = serializers.FileField(
help_text='사용자 마우스 패턴',
)
resource_file = serializers.FileField(
help_text='사용자 리소스 패턴',
)
cookie_jwt = serializers.CharField(
help_text='쿠키의 jwt',
)
|
import pandas as pd
from sklearn import preprocessing
from preprocessing import read, split, non_numerical_features, one_hot_encoding
from preprocessing import drop_features, deal_with_23 , deal_with_58
from postprocessing import writeoutput
from csv import DictReader, DictWriter
from sklearn.feature_selection import VarianceThreshold
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesClassifier
from collections import Counter
import time
from sklearn.ensemble import RandomForestClassifier
from csv import DictReader, DictWriter
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
start = time.time()
#data = read('data_test.csv')
#quiz = read('quiz_test.csv')
data = read('data.csv')
label = data['label']
data = data.drop('label', axis = 1)
quiz = read('quiz.csv')
data = deal_with_23(data)
quiz = deal_with_23(quiz)
data = deal_with_58(data)
quiz = deal_with_58(quiz)
print(data.shape)
print(quiz.shape)
#data = data.drop('23', axis = 1)
#quiz = quiz.drop('23', axis = 1)
#data = data.drop('58', axis = 1)
#quiz = quiz.drop('58', axis = 1)
categories = non_numerical_features(data)
print(categories)
data, quiz = one_hot_encoding(data, quiz,categories)
print(list(data.columns.values) )
print(stop)
data = drop_features(data, categories)
quiz = drop_features(quiz, categories)
print(data.shape)
print(quiz.shape)
#drop null from column 0
#data = data.drop('0_6', axis = 1)
#quiz = quiz.drop('0_6', axis = 1)
#data = data.drop('9_6', axis = 1)
#quiz = quiz.drop('9_6', axis = 1)
#data = data.drop('56_16', axis = 1)
#quiz = quiz.drop('56_16', axis = 1)
#data = data.drop('57_16', axis = 1)
#quiz = quiz.drop('57_16', axis = 1)
#data = data.drop('58_84', axis = 1)
#quiz = quiz.drop('58_84', axis = 1)
#drop the other classes from 23 and 58
#data = data.drop('23_203', axis = 1)
#quiz = quiz.drop('23_203', axis = 1)
#data = data.drop('58_139', axis = 1)
#quiz = quiz.drop('58_139', axis = 1)
#train_data = preprocessing.normalize(data)
#test_data = preprocessing.normalize(quiz)
print(data.shape)
print(quiz.shape)
sel = VarianceThreshold(threshold=(.97 * (1 - .97)))
selector = sel.fit(data)
data = selector.fit_transform(data)
print('Number of features used... ' + str(Counter(selector.get_support())[True]))
print('Number of features ignored... ' + str(Counter(selector.get_support())[False]))
idxs = selector.get_support(indices=True)
print(idxs)
quiz = quiz.values
quiz = quiz[:,idxs]
#quiz = selector.fit_transform(quiz)
print("after :")
print(data.shape)
print(quiz.shape)
#print(stop)
train_data = preprocessing.normalize(data)
test_data = preprocessing.normalize(quiz)
print("-------------------------------------")
print("Adaboost Classifier 1-200 ")
model2 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=16), algorithm="SAMME", n_estimators=500)
#train_data = data.values
#test_data = quiz.values
#train_data = data
#test_data = quiz
model2 = model2.fit(train_data,label.values.T)
output = model2.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output1 = model2.predict(test_data)
writeoutput('output1.csv',output1)
outputA = output1
print("-------------------------------------")
print("Random Forest Classifier 300 ")
model3 = RandomForestClassifier(n_estimators = 500)
model3 = model3.fit(train_data,label.values.T)
output = model3.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output2 = model3.predict(test_data)
writeoutput('output2.csv',output2)
'''
print("-------------------------------------")
print("Logical Regression ")
model4 = LogisticRegression()
model4 = model4.fit(train_data,label.values.T)
output = model4.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output3 = model4.predict(test_data)
writeoutput('output3.csv',output3)
'''
print("-------------------------------------")
print("K NN 2")
model5 = KNeighborsClassifier(n_neighbors=2)
model5 = model5.fit(train_data,label.values.T)
output = model5.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output5 = model5.predict(test_data)
writeoutput('output5.csv',output5)
for i in range(0,len(output5)):
value =output1[i] +(2*output2[i]) + (2*output5[i])
if value<0:
outputA[i] = -1
else:
outputA[i] = 1
writeoutput('output.csv',outputA)
done = time.time()
elapsed = done - start
print(elapsed)
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 09:09:38 2019
@author: Vall
"""
import iv_utilities_module as ivu
import iv_save_module as ivs
import numpy as np
import os
# Parameters
home = r'C:\Users\Valeria\OneDrive\Labo 6 y 7'
path = os.path.join(home, r'Muestras\SEM\LIGO5bis\1')
series = 'LIGO5bis_1'
# Load data
rwidth = []
rheight = []
height = []
width = []
hangle = []
wangle = []
for file in os.listdir(path):
if file.endswith("W.csv"):
rwidth.append(file.split('_W.csv')[0].split('_')[-1])
width.append(np.loadtxt(os.path.join(path, file),
delimiter=',',
skiprows=1)[:,-1])
wangle.append(np.loadtxt(os.path.join(path, file),
delimiter=',',
skiprows=1)[:,-2])
elif file.endswith("H.csv"):
rheight.append(file.split('_H.csv')[0].split('_')[-1])
height.append(np.loadtxt(os.path.join(path, file),
delimiter=',',
skiprows=1)[:,-1])
hangle.append(np.loadtxt(os.path.join(path, file),
delimiter=',',
skiprows=1)[:,-2])
# Organize length data
if rwidth!=rheight:
raise ValueError("¡Falta algún dato!")
rods = rwidth
height = np.array(height).T
width = np.array(width).T
del file, rwidth, rheight
# Organize angle data...
# ...1st fix the horizontal angles measured upside down
new_hangle = []
for ha in hangle:
new_ha = []
for i in ha:
difference = i - np.mean(ha)
if abs(difference)>90:
if abs(difference-180) < abs(difference+180):
new_ha.append(i-180)
else:
new_ha.append(i+180)
else:
new_ha.append(i)
new_hangle.append(new_ha)
del new_ha, i
hangle = np.array(new_hangle).T
del new_hangle
# ...2nd fix the vertical angles measured upside down
new_wangle = []
for wa in wangle:
new_wa = []
for j in wa:
difference = np.mean(wa) - j
if abs(difference)>90:
if abs(difference-180) < abs(difference+180):
new_wa.append(j-180)
else:
new_wa.append(j+180)
else:
new_wa.append(j)
new_wangle.append(new_wa)
del new_wa, j
wangle = np.array(new_wangle).T
del new_wangle
# ...3rd rotate vertical angles to be horizontal ones
new_wangle = []
for ha, wa in zip(hangle.T, wangle.T):
difference = np.mean(ha) - np.mean(wa)
if abs(difference-90) < abs(difference+90):
new_wangle.append(wa + 90)
else:
new_wangle.append(wa - 90)
wangle = np.array(new_wangle).T
del ha, wa, difference, new_wangle
# ...4th make all angles point between 0 and 135
angle = np.array([[*ha, *wa] for ha, wa in zip(hangle.T, wangle.T)]).T
new_angle = []
for a in angle.T:
if np.mean(a) < 0:
new_angle.append(a + np.ones(len(a))*180)
elif np.mean(a) > 180:
new_angle.append(a - np.ones(len(a))*180)
else:
new_angle.append(a)
angle = np.array(new_angle).T
del wangle, hangle, new_angle
# Get results
W = np.mean(width, axis=0)
dW = np.std(width, axis=0)
H = np.mean(height, axis=0)
dH = np.std(height, axis=0)
a = np.mean(angle, axis=0)
da = np.std(angle, axis=0)
# Apply correction due to method
H = H + dH
W = W + dW
A = H/W
dA = H*dW/W**2 + dH/W
# Organize results
results = np.array([W,dW,H,dH,A,dA,a,da]).T
heading = ["Ancho (nm)", "Error (nm)",
"Longitud (nm)", "Error (nm)",
"Relación de aspecto", "Error",
"Ángulo (°)", "Error (°)"]
# Save data
ivs.saveTxt(
os.path.join(path,'Resultados_SEM_{}.txt'.format(series)),
results,
header=heading, footer=dict(rods=rods),
overwrite=True
)
# Round and gather results
items = []
for i in range(len(rods)):
w = '\t'.join(ivu.errorValue(W[i], dW[i]))
h = '\t'.join(ivu.errorValue(H[i], dH[i]))
ra = '\t'.join(ivu.errorValue(A[i], dA[i], one_point_scale=True))
an = '\t'.join(ivu.errorValue(a[i], da[i]))
items.append('\t'.join([w, h, ra, an]))
del w, h, ra, an, W, H, A, a, dW, dH, dA, da
# Make OneNote table
heading = '\t'.join(heading)
items = ['\t'.join([n, r]) for n, r in zip(rods, items)]
items = '\n'.join(items)
heading = '\t'.join(['Rod', heading])
table = '\n'.join([heading, items])
ivu.copy(table)
del heading, items |
###################################
# INTALLS : - passlib #
###################################
from passlib.hash import pbkdf2_sha512
inf = "$pbkdf2-sha512$95846$"
def hash_password(password):
try:
hashed_password = pbkdf2_sha512.using(salt_size=16, rounds=95846).hash(password)
print(hashed_password[21:])
return hashed_password[21:]
except Exception as e:
print(e)
def verify_password(password, hashed):
global inf
return pbkdf2_sha512.verify(password, inf + hashed) |
import pandas as pd
import pyterrier as pt
import unittest
import os
from .base import BaseTestCase
class TestUtils(BaseTestCase):
def test_parse_trec_topics_file(self):
input = os.path.dirname(os.path.realpath(__file__)) + "/fixtures/topics.trec"
exp_result = pd.DataFrame([["1", "light"], ["2", "radiowave"], ["3", "sound"]], columns=['qid', 'query'])
result = pt.Utils.parse_trec_topics_file(input)
self.assertTrue(exp_result.equals(result))
def test_convert_df_to_pytrec_eval_float(self):
input = pd.DataFrame([["1", "1", 12.5], ["1", "7", 4.3], ["2", "12", 8.5]], columns=["qid", "docno", "score"])
exp_result = {"1": {"1": 12.5, "7": 4.3}, "2": {"12": 8.5}}
result = pt.Utils.convert_res_to_dict(input)
self.assertEqual(exp_result, result)
def test_convert_df_to_pytrec_eval_int(self):
input = pd.DataFrame([["1", "1", 1], ["1", "7", 0], ["2", "12", 1]], columns=["qid", "docno", "score"])
exp_result = {"1": {"1": 1, "7": 0}, "2": {"12": 1}}
result = pt.Utils.convert_res_to_dict(input)
self.assertEqual(exp_result, result)
def test_parse_qrels(self):
input = os.path.dirname(os.path.realpath(__file__)) + "/fixtures/qrels"
exp_result = pd.DataFrame([["1", "13", 1], ["1", "15", 1], ["2", "8", 1], ["2", "4", 1], ["2", "17", 1], ["3", "2", 1]], columns=['qid', 'docno', 'label'])
result = pt.Utils.parse_qrels(input)
#print(exp_result)
#print(result)
pd.testing.assert_frame_equal(exp_result, result)
def test_evaluate(self):
input_qrels = pd.DataFrame([["1", "12", 1], ["1", "26", 1], ["1", "5", 1], ["1", "6", 1], ["2", "12", 1], ["2", "13", 1], ["2", "7", 1], ["2", "17", 1]], columns=["qid", "docno", "label"])
input_res = pd.DataFrame([["1", "12", 3.917300970672472], ["1", "17", 3.912008156607317], ["1", "5", 3.895776784815295], ["1", "6", 1.6976053561565434], ["1", "11394", 1.419217511596875],
["2", "12", 3.352655284198764], ["2", "13", 3.3410694508732677], ["2", "7", 3.32843147860022], ["2", "15", 3.210614190096991], ["2", "17", 1.3688610792424558], ["2", "25", 1.2673250497019404]],
columns=['qid', 'docno', 'score'])
exp_result = [0.6042, 0.9500]
result = pt.Utils.evaluate(input_res, input_qrels, perquery=True)
# mapValue=result["map"]
# result = ast.literal_eval(result)
self.assertAlmostEqual(sum(exp_result) / len(exp_result), 0.7771, places=4)
for i, item in enumerate(exp_result):
self.assertAlmostEqual(result[str(i + 1)]["map"], item, places=4)
if __name__ == "__main__":
unittest.main()
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
import os
import sys
from setuptools import setup
INFO_PLIST_TEMPLATE = '''\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>%(name)s</string>
</dict>
</plist>
'''
try:
with open(os.path.join(os.path.dirname(sys.executable), 'Info.plist'), 'w') as f:
f.write(INFO_PLIST_TEMPLATE % {'name': 'gapa'})
except IOError:
pass
APP = ['gapa.py']
DATA_FILES = [ ('images', ['images/circle.png']) ]
OPTIONS = {
'argv_emulation': True,
'iconfile':'images/icon.icns',
'plist': {
'LSUIElement': True,
}
}
with open('README.adoc') as f:
readme = f.read()
setup(
name="Gapa",
version="1.0.0",
description="a mini tool that enables to hide desktop items",
author="Hakan Ozler",
url="https://github.com/ozlerhakan/gapa",
license="MIT License",
long_description=readme,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
from console_progressbar import ProgressBar
from nltk import word_tokenize, pos_tag
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
class StemTokenizer(object):
counter = 0
def __init__(self, num_docs=None):
self.num_docs = num_docs
self.pb = ProgressBar(
total=self.num_docs,
suffix="Pre-processing documents",
decimals=0,
length=50,
fill="█",
zfill="-",
)
def __call__(self, doc):
"""
Takes as an input a document and returns a list of stems corresponding to the
constituent words of that document.
Filters out:
1) Words whose pos_tag is contained in the stop_pos_tags list
2) Words that are contained in a stop word list,
i.e. stopwords.words('english')
3) Words whose stem is contained in a stop word list,
i.e. stopwords.words('english')
"""
# pos_tags to exclude
stop_pos_tags = ["CD", "RB", "CC", "DT"]
stemmer = PorterStemmer()
stemmed_words = []
# Tokenise document
tokenised_text = word_tokenize(doc)
# Pos tag document
tagged_text = pos_tag(tokenised_text)
for tag in tagged_text:
word = tag[0]
p_tag = tag[1]
stemmed_word = stemmer.stem(word)
"""
Check whether:
1) length of word is greater than 1 and
2) and pos tag of word, i.e. p_tag, is not contained in the
stop_pos_tags list and
3) word is not contained in the stopwords.words('english')
4) stemmed_word is not contained in the stopwords.words('english')
"""
if (
len(word) > 1
and p_tag not in stop_pos_tags
and word not in stopwords.words("english")
and stemmed_word not in stopwords.words("english")
):
stemmed_words.append(stemmed_word)
StemTokenizer.counter += 1
# print('Done processing pre-processing doc', StemTokenizer.counter)
self.pb.print_progress_bar(StemTokenizer.counter)
return stemmed_words
|
import os #system함수를 사용하기 위한 모듈
#C언어 배우신 분들은 #include<> => header파일과 유사
num = 0
while True: #무한반복문 : 조건식이 거짓말이 안되는 반복문
print("""
====메뉴====
1.정수 입력
2.입력된 정수 출력
3.종료""")
select = int(input("메뉴 선택 : "))
if select == 1:
num = int(input("정수 입력 : "))
elif select == 2:
if num != 0:
print("입력된 정수 : %d"%num)
else:
print("정수를 먼저 입력하세요")
else:
exit(0) #프로그램 종료함수
os.system("pause") #코드 일시정지
os.system("cls") #콘솔창을 지워줍니다.
|
n = int(input('Digite um número: '))
div = 0
for c in range(1, n+1):
if n % c == 0:
div += 1
print('\033[0;33m', c, '\033[m', end='')
else:
print('\033[0;31m', c, '\033[m', end='')
print(f"""\nO número {n} foi divisível {div} veze(s)
E por isso ele """, end='')
if div == 2:
print('É PRIMO!')
else:
print('NÃO É PRIMO!')
|
# Generated by Django 3.1.5 on 2021-03-02 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('JOB', '0009_jobapplied'),
]
operations = [
migrations.AddField(
model_name='reqruiteruser',
name='email',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='studentuser',
name='email',
field=models.CharField(default='', max_length=50),
),
]
|
import requests, random, logging
from kkbox_line_bot import app
from kkbox_line_bot.nlp import olami
from kkbox_line_bot.nlp.error import NlpServiceError
from linebot import LineBotApi, WebhookHandler
from linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageMessage, ImageSendMessage, VideoMessage, VideoSendMessage, AudioMessage
logger = logging.getLogger(__name__)
webhook_handler = WebhookHandler(app.config['LINE_CHANNEL_SECRET'])
line_bot_api = LineBotApi(app.config['LINE_CHANNEL_ACCESS_TOKEN'])
def ig_urls():
url = 'https://www.instagram.com/explore/locations/262402199/'
headers = {'user-agent': 'Fox Mulder'}
r = requests.get(url, headers=headers)
for line in r.text.splitlines():
if '>window._sharedData' in line:
urls = []
for display_url in line.split('display_url":"')[1:]:
urls.append(display_url.split('"')[0].replace('\\u0026', '&'))
#print(urls)
return urls
@webhook_handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
logger.debug(event)
olami_svc = olami.OlamiService(app.config['OLAMI_APP_KEY'],
app.config['OLAMI_APP_SECRET'],
cusid=None)#event.source.user_id)
msg_txt = event.message.text.strip()
reply = None
try:
if '北一最' in msg_txt or '北一誰最' in msg_txt:
adj = msg_txt.split('最')[1]
for x in '的是誰呢ㄋ啊阿ㄚ嗎嘛ㄇ??':
adj = adj.split(x)[0]
if '=' in adj or '=' in adj:
adj, who = adj.split('=' if '=' in adj else '=')
if adj and who:
requests.get(app.config['GOOGLE_SHEETS']+'?'+adj+'='+who)
reply = TextSendMessage(text='是喔!')
else:
reply = TextSendMessage(text='蛤?')
else:
who = requests.get(app.config['GOOGLE_SHEETS']+'?'+adj).text
reply = TextSendMessage(text=who)
elif '口罩' in msg_txt:
reply = TextSendMessage(text='geobingan.info/#/event/mask')
else:
reply = []
likes = msg_txt.count('讚') + msg_txt.count('👍')
for url in ig_urls()[:likes if likes < 5 else 5]:#random.sample(urls, count):
#reply.append(TextSendMessage(text=url))
reply.append(ImageSendMessage(
original_content_url=url,
preview_image_url=url))
#resp = olami_svc(msg_txt[2:])
#reply = resp.as_line_messages()
#if event.source.user_id == 'U277d1a8cf7717e27e5d7d46971a64f65':
# reply = ImageSendMessage(
# original_content_url='https://www.1001000.io/img/cucumber.gif',
# preview_image_url='https://www.1001000.io/img/cucumber.jpg')
#if '發財' in msg_txt or '發大財' in msg_txt:
# reply = ImageSendMessage(
# original_content_url='https://www.1001000.io/img/whiteeye.gif',
# preview_image_url='https://www.1001000.io/img/whiteeye.gif')
except NlpServiceError as e:
err_msg = 'NLP service is currently unavailable: {}'.format(repr(e))
logger.error(err_msg)
reply = TextSendMessage(text=err_msg)
except Exception as e:
err_msg = 'Unexpected error: {}'.format(repr(e))
logger.exception(err_msg)
reply = TextSendMessage(text=err_msg)
finally:
payload = {'text':msg_txt, 'user_id':event.source.user_id}
try:
payload['group_id'] = event.source.group_id
except:
pass
try:
payload['room_id'] = event.source.room_id
except:
pass
requests.post(app.config['GOOGLE_SHEETS'], data=payload)
if reply:
logger.info(reply)
line_bot_api.reply_message(event.reply_token, reply)
@webhook_handler.add(MessageEvent, message=(ImageMessage, VideoMessage, AudioMessage))
def handle_content_message(event):
if isinstance(event.message, ImageMessage):
ext = '.jpg'
elif isinstance(event.message, VideoMessage):
ext = '.mp4'
elif isinstance(event.message, AudioMessage):
ext = '.m4a'
else:
return
payload = {'text':event.message.id+ext, 'user_id':event.source.user_id}
try:
payload['group_id'] = event.source.group_id
except:
pass
try:
payload['room_id'] = event.source.room_id
except:
pass
requests.post(app.config['GOOGLE_SHEETS'], data=payload) |
#!/usr/bin/env python
import base64
import datetime
import json
# import os
import requests
import shutil
import numpy as np
import pandas as pd
import xarray as xr
import sys
import os
# %%
fildir = '/sand/usgs/users/ssuttles/wind/'
def fetch_api_data(params):
s = requests.Session()
r = s.get('https://dashboard.hologram.io/api/1/csr/rdm', params=params)
lines = []
for n in range(len(r.json()['data'])):
lines.append(base64.b64decode(json.loads(r.json()['data'][n]['data'])['data']).decode('utf-8').split(','))
while r.json()['continues']:
r = s.get('https://dashboard.hologram.io' + r.json()['links']['next'])
print('appending lines', lines[-1])
for n in range(len(r.json()['data'])):
lines.append(base64.b64decode(json.loads(r.json()['data'][n]['data'])['data']).decode('utf-8').split(','))
return lines
if len(sys.argv) == 1:
site = 'hmb'
else:
site = sys.argv[1]
print(site)
deviceid = {'hmb': '734540',
'bel': '585918'}
timestart = {'hmb': 1594740000,
'bel': 1586962800}
latlon = {'hmb': {'lat': 42, 'lon': -70},
'bel': {'lat': 48.760415, 'lon': -122.521977}}
title = {'hmb': 'Head of Meadow Buoy Wind Station',
'bel': 'Bellingham Bay Meteorological Station'}
params = {}
with open('hologram.apikey') as f:
params['apikey'] = f.read().strip()
params['deviceid'] = deviceid[site]
try:
dsold = xr.load_dataset(fildir + site + '.nc')
params['timestart'] = str((dsold.time[-1].astype('uint64')/1e9).astype('int').values)
print('starting from incremental holo file. First burst', dsold['time'][0].values, 'last burst', dsold['time'][-1].values)
except FileNotFoundError:
dsold = xr.Dataset()
params['timestart'] = timestart[site]
print('starting from scratch with holo')
print("params['timestart']", params['timestart'])
lines = fetch_api_data(params)
# %%
df = pd.DataFrame([dict(zip(l[0::2], l[1::2])) for l in lines])
df['time'] = pd.DatetimeIndex(df['time'])
df.set_index('time', inplace=True)
for k in df.columns:
df[k] = pd.to_numeric(df[k])
dsnew = df.to_xarray().sortby('time')
dsnew['time'] = pd.DatetimeIndex(dsnew['time'].values)
# need to adjust direction for Bellingham before merging so we don't do it multiple times
if site == 'bel':
# wind sensor was installed 15 degrees west of magnetic north.
# i.e. sensor is pointing at 345
#
#SN MN
# \ |
# \ 15|
# \ |
# \ |
# \|
# SN = sensor north
# MN = magnetic north
# a wind from 0 degrees magnetic would register as 15 on our sensor
# so subtract 15
dsnew['Dm'] = (dsnew['Dm'] - 15) % 360
ds = xr.merge([dsold, dsnew])
for k in ds.data_vars:
if 'time' in ds[k].dims:
ds[k][ds[k] == -9999] = np.nan
ds = ds.drop('sample')
ds.attrs['title'] = title[site] + '. PROVISIONAL DATA SUBJECT TO REVISION.'
ds.attrs['history'] = 'Generated using vaisala-holo.py'
ds['latitude'] = xr.DataArray([latlon[site]['lat']], dims='latitude')
ds['longitude'] = xr.DataArray([latlon[site]['lon']], dims='longitude')
ds['feature_type_instance'] = xr.DataArray(site)
ds['feature_type_instance'].attrs['long_name'] = 'station code'
ds['feature_type_instance'].attrs['cf_role'] = 'timeseries_id'
ds.attrs['naming_authority'] = 'gov.usgs.cmgp'
ds.attrs['original_folder'] = 'wind'
ds.attrs['featureType'] = 'timeSeries'
ds.attrs['cdm_timeseries_variables'] = 'feature_type_instance, latitude, longitude'
if site == 'gri':
ds.attrs['elevation'] = 'Sensor elevation 6.81 m NAVD88'
def add_standard_attrs(ds):
ds.attrs['Conventions'] = 'CF-1.6'
ds.attrs['institution'] = 'U.S. Geological Survey'
ds['time'].attrs['standard_name'] = 'time'
ds['Dm'].attrs['standard_name'] = 'wind_from_direction'
ds['Dm'].attrs['units'] = 'degree'
ds['Sm'].attrs['standard_name'] = 'wind_speed'
ds['Sm'].attrs['units'] = 'm s-1'
ds['Pa'].attrs['standard_name'] = 'air_pressure'
ds['Pa'].attrs['units'] = 'Pa'
ds['Ta'].attrs['standard_name'] = 'air_temperature'
ds['Ta'].attrs['units'] = 'degree_C'
ds['Ua'].attrs['standard_name'] = 'relative_humidity'
ds['Ua'].attrs['units'] = 'percent'
ds['Rc'].attrs['standard_name'] = 'rainfall_amount'
ds['Rc'].attrs['units'] = 'mm'
if 'signalpct' in ds:
ds['signalpct'].attrs['units'] = 'percent'
ds['signalpct'].attrs['long_name'] = 'Cellular signal strength'
if 'boardbatt' in ds:
ds['boardbatt'].attrs['units'] = 'V'
ds['boardbatt'].attrs['long_name'] = 'Logger board battery voltage'
if 'boardtemp' in ds:
ds['boardtemp'].attrs['units'] = 'degree_C'
ds['boardtemp'].attrs['long_name'] = 'Logger board temperature'
if 'latitude' in ds:
ds['latitude'].attrs['long_name'] = 'latitude'
ds['latitude'].attrs['units'] = "degrees_north"
ds['latitude'].attrs['standard_name'] = "latitude"
ds['latitude'].encoding['_FillValue'] = None
if 'longitude' in ds:
ds['longitude'].attrs['long_name'] = 'longitude'
ds['longitude'].attrs['units'] = "degrees_east"
ds['longitude'].attrs['standard_name'] = "longitude"
ds['longitude'].encoding['_FillValue'] = None
add_standard_attrs(ds)
ds = ds.squeeze()
# %%
# make a backup
now = datetime.datetime.now()
timestr = now.strftime('%Y%m%d%H%M%S')
hour = now.strftime('%H')
try:
if hour == '00':
shutil.copy(fildir + site + '.nc', fildir + '../wind_bak/' + site + timestr + '.nc')
except:
print('Could not make backup. This may occur on first run')
ds.to_netcdf(fildir + site + '.nc', encoding={'time': {'dtype': 'int32'},
'signalpct': {'dtype': 'int32'},
'Dm': {'dtype': 'int32'}})
|
"""Sample AWS Lambda function for remembering a favorite color."""
from alexa import AlexaSkill, AlexaResponse, intent_callback
class Color(AlexaSkill):
card_title = "Favorite Color"
def _get_welcome(self):
reprompt_text = ("Please tell me your favorite color by saying, "
"my favorite color is red")
output_speech = ("Welcome to the Alexa Skills Kit sample, " +
reprompt_text)
return AlexaResponse(session_attributes={},
output_speech=output_speech,
card_title=self.card_title,
reprompt_text=reprompt_text,
should_end_session=False)
def handle_launch(self, request, session):
return self._get_welcome()
@intent_callback('HelpIntent')
def on_help(self, intent, session):
return self._get_welcome()
@intent_callback('MyColorIsIntent')
def on_my_color_is(self, intent, session):
slot = intent['slots'].get("Color")
print(slot)
if slot:
favorite_color = slot['value']
output_speech = ("I now know your favorite color is {}. You can "
"ask me your favorite color by saying, what's "
"my favorite color?".format(favorite_color))
reprompt_text = ("You can ask me your favorite color by saying, "
"what's my favorite color?")
session_attributes = {'favoriteColor': favorite_color}
else:
output_speech = ("I'm not sure what your favorite color is, "
"please try again")
reprompt_text = ("I'm not sure what your favorite color is, "
"you can tell me your favorite color by saying, "
"my favorite color is red")
session_attributes = {}
return AlexaResponse(session_attributes=session_attributes,
output_speech=output_speech,
card_title=self.card_title,
reprompt_text=reprompt_text,
should_end_session=False)
@intent_callback('WhatsMyColorIntent')
def on_whats_my_color(self, intent, session):
favorite_color = session.get("favoriteColor")
if favorite_color:
output_speech = ("Your favorite color is {}, Ian".format(
favorite_color))
should_end_session = True
else:
output_speech = ("I'm not sure what your favorite color is. "
"You can say, my favorite color is red")
should_end_session = False
return AlexaResponse(session_attributes={},
output_speech=output_speech,
card_title=self.card_title,
reprompt_text=None,
should_end_session=should_end_session)
def lambda_handler(event, context):
return Color().handle(event, context)
if __name__ == '__main__':
import json
event = {
'request': {
'type': "IntentRequest",
'intent': {
'slots': {
"Color": 'red'
},
'name': "MyColorIsIntent",
'requestId': "request5678"
}
},
'session': {
'new': False
},
'version': "1.0"
}
context = None
print(json.dumps(lambda_handler(event, context), indent=2))
|
#!/usr/bin/env python
from latex_meta_lib import metacls_objlib
class SingleSentence():
''' single figure class'''
def __init__(self,tag=None):
self.tag = tag # variable name
self.text = ''
self.filepath = None # value
self.format = None # description
class Paragraph():
__metaclass__ = metacls_objlib
''' variable library'''
def __init__(self,tag):
self.tag = tag
self.itemlib = {}
self.count = 0
self.libtype = SingleSentence
self.wholetext = ''
self.tablelist = []
self.figurelist = []
self.equationlist = []
self.referencelist= []
def Add(self,objtype,obj):
if objtype == 'table':
self.tablelist.append(obj)
elif objtype == 'figure':
self.figurelist.append(obj)
elif objtype == 'equation':
self.equationlist.append(obj)
elif objtype == 'reference':
self.referencelist.append(obj)
else:
raise KeyError
def AddByDict(self,itemdict):
self.itemdict = itemdict
def ExtractTex(self):
pass
class ParagraphLib():
__metaclass__ = metacls_objlib
''' variable library'''
def __init__(self):
self.tag = ''
self.itemlib = {}
self.count = 0
self.libtype = Paragraph
def AddByTex(self,tag,lines):
p1 = Paragraph(tag)
p1.wholetext = lines
self.Add(p1)
return p1
|
import os
import sys
import warnings
import pytest
import aospy
def test_tutorial_notebook():
pytest.importorskip('nbformat')
pytest.importorskip('nbconvert')
pytest.importorskip('matplotlib')
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
rootdir = os.path.join(aospy.__path__[0], 'examples')
with open(os.path.join(rootdir, 'tutorial.ipynb')) as nb_file:
notebook = nbformat.read(nb_file, as_version=nbformat.NO_CONVERT)
kernel_name = 'python' + str(sys.version[0])
ep = ExecutePreprocessor(kernel_name=kernel_name)
with warnings.catch_warnings(record=True):
ep.preprocess(notebook, {})
|
import logging
import json
import awacs.cloudwatch
import awacs.sns
import awacs.sts
import awacs.ssm
import awacs.ec2
import awacs.autoscaling
from awacs.aws import Allow, Policy, Principal, Statement
from runway.cfngin.blueprints.base import Blueprint as CFNGinBlueprint
from runway.cfngin.blueprints.variables.types import CFNString
from troposphere import GetAtt, Ref, Sub, awslambda, events, iam, ssm
log = logging.getLogger(__name__)
IAM_SVC_ARN_PREFIX = "arn:aws:iam::aws:policy/service-role/"
class Blueprint(CFNGinBlueprint):
VARIABLES = {
"FunctionRuntime": {
"type": CFNString,
"default": "python3.8",
"description": "Lambda functions runtime. Used on both functions.",
},
"FunctionMemory": {
"type": CFNString,
"default": "256",
"description": "Lambda functions memory. Used on both functions.",
},
"FunctionTimeout": {
"type": CFNString,
"default": "300",
"description": "Lambda functions timeout. Used on both functions.",
},
"FunctionCron": {
"type": CFNString,
"default": "cron(00 07 ? * SAT#2 *)",
"description": "Schedule that triggers the Lambda Function",
},
"ASGNamesPath": {
"type": CFNString,
"description": "SSM Param Path",
},
"ASGNames": {
"type": list,
"default": [],
"description": "List of ASG Names",
},
"InstanceRoleCommon": {
"type": CFNString,
"description": "List of ASG Instance Roles",
},
"InstanceRolePreview": {
"type": CFNString,
"description": "List of ASG Instance Roles",
},
"InstanceRoleProd": {
"type": CFNString,
"description": "List of ASG Instance Roles",
}
}
def _create_resources_update_asg(self):
variables = self.get_variables()
template = self.template
ssm_parameters = template.add_resource(
ssm.Parameter(
"UpdateASGParameter",
Type="String",
Value=json.dumps(variables["ASGNames"]),
Name=variables["ASGNamesPath"].ref,
)
)
function = template.add_resource(
awslambda.Function(
"UpdateASGFunction",
Code=self.context.hook_data["lambda"]["UpdateASGFunction"],
Handler="lambda_function.lambda_handler",
Role=GetAtt(self.lambdarole, "Arn"),
Runtime=variables["FunctionRuntime"].ref,
Description="Gets latest AMI and updates Bastion ASG, terminates/starts new instance with updated AMI",
Environment=awslambda.Environment(
Variables={"ASG_NAMES_PATH": variables["ASGNamesPath"].ref}),
MemorySize=variables["FunctionMemory"].ref,
Timeout=variables["FunctionTimeout"].ref,
)
)
trigger = events.Rule(
"UpdateASGTrigger",
ScheduleExpression=variables["FunctionCron"].ref,
State="ENABLED",
Targets=[
events.Target(
Arn=GetAtt(function, "Arn"), Id="UpdateASGTriggerLambdaArn"
)
],
)
template.add_resource(
awslambda.Permission(
"UpdateASGTriggerEventPermission",
Action="lambda:InvokeFunction",
FunctionName=Ref(function),
Principal="events.amazonaws.com",
SourceArn=GetAtt(
# adds resource and references it in one line.
# Only way to avoid dynamic variables.
template.add_resource(trigger),
"Arn",
),
)
)
def _create_lambda_role(self):
variables = self.get_variables()
template = self.template
self.lambdarole = template.add_resource(
iam.Role(
"LambdaRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal("Service", ["lambda.amazonaws.com"]),
)
],
),
ManagedPolicyArns=[
IAM_SVC_ARN_PREFIX + "AWSLambdaBasicExecutionRole",
],
Policies=[
iam.Policy(
PolicyName="update-asg-lambda",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Action=[
awacs.ssm.DescribeParameters,
awacs.ssm.GetParameter,
awacs.autoscaling.CreateLaunchConfiguration,
awacs.autoscaling.DescribeAutoScalingInstances,
awacs.autoscaling.DescribeAutoScalingGroups,
awacs.autoscaling.DescribeTags,
awacs.autoscaling.DescribeLaunchConfigurations,
awacs.autoscaling.UpdateAutoScalingGroup,
awacs.autoscaling.TerminateInstanceInAutoScalingGroup,
awacs.ec2.TerminateInstances
],
Effect=Allow,
Resource=["*"],
),
Statement(
Action=[
awacs.iam.PassRole,
],
Effect=Allow,
Resource=[
variables["InstanceRoleCommon"].ref,
variables["InstanceRolePreview"].ref,
variables["InstanceRoleProd"].ref
],
),
],
),
)
],
)
)
def create_resources(self):
self._create_lambda_role()
self._create_resources_update_asg()
def create_template(self):
self.create_resources()
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'sse_extensions',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'EnableEnhancedInstructionSet': '1', # StreamingSIMDExtensions
}
},
'sources': ['enable-enhanced-instruction-set.cc'],
},
{
'target_name': 'sse2_extensions',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'EnableEnhancedInstructionSet': '2', # StreamingSIMDExtensions2
}
},
'sources': ['enable-enhanced-instruction-set.cc'],
},
],
'conditions': [
['MSVS_VERSION[0:4]>"2010"', {
'targets': [
{
'target_name': 'avx_extensions',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'EnableEnhancedInstructionSet': '3', # AdvancedVectorExtensions
}
},
'sources': ['enable-enhanced-instruction-set.cc'],
},
{
'target_name': 'no_extensions',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'EnableEnhancedInstructionSet': '4', # NoExtensions
}
},
'sources': ['enable-enhanced-instruction-set.cc'],
},
],
}],
['MSVS_VERSION[0:4]>="2013"', {
'targets': [
{
'target_name': 'avx2_extensions',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'EnableEnhancedInstructionSet': '5', # AdvancedVectorExtensions2
}
},
'sources': ['enable-enhanced-instruction-set.cc'],
},
],
}],
],
}
|
# -*- coding: utf-8 -*-
'''
@author : smh2208
@software: PyCharm
@file : adminx.py
@time : 2018/7/21 17:35
@desc :
'''
import xadmin
from .models import EmailVerifyRecord,Banner
#不再集成admin,而是object
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code','email','send_type']
class BannerAdmin(object):
list_display = ['title','image','url','index','add_time']
search_fields = ['title','image','url','index']
list_filter = ['title','image','url','index','add_time']
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
from xadmin import views,site
class BaseSeting(object):
enable_themes = True
use_bootswatch = True
site.register(views.BaseAdminView,BaseSeting)
#这段全局设置代码,放在任意一个app的adminx.py文件中都行
class GlobalSettings(object):
site_title = " Mooc后台管理站"
site_footer = "Mooconline"
# 收起菜单
menu_style = "accordion"
# 将头部与脚部信息进行注册:
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
default_app_config = 'colossus.apps.lists.apps.MailingConfig'
|
#!/usr/bin/python3
"""Square module"""
class Square:
"""Represents a square."""
def __init__(self, size=0):
"""Initializes the square.
Args:
size (int): Size to create the square, defautls to 0.
Attributes:
__size (int): Private, size of the square.
"""
self.size = size
@property
def size(self):
"""Sets and gets the square's size."""
return self.__size
@size.setter
def size(self, value):
"""Verifies that size is integer and bigger than zero."""
if type(value) is not int:
raise TypeError("size must be an integer")
elif value < 0:
raise ValueError("size must be >= 0")
else:
self.__size = value
def area(self):
"""Calculates the square's area.
Returns:
Square's area.
"""
return self.__size ** 2
def my_print(self):
"""Prints the square to stdout represented by '#'."""
if self.size:
for row in range(self.size):
for col in range(self.size):
print("#", end='')
print()
else:
print()
|
#!/usr/bin/python3
lazy_matrix_mul = __import__('101-lazy_matrix_mul').lazy_matrix_mul
print(lazy_matrix_mul([[1, 2], [3, 4]], [[1, 2], [3, 4]]))
print(lazy_matrix_mul([[1, 2, 3], [3, 4, 5]], [[1, 2], [3, 4], [5, 6]]))
print(lazy_matrix_mul([[1, 2]], [[3, 4], [5, 6]]))
print(lazy_matrix_mul([[True, 2]], [[3, 4], [5, 9]]))
|
from models import About_us, History, Facts, QnA
import xadmin
xadmin.autodiscover()
xadmin.site.register(About_us)
xadmin.site.register(History)
xadmin.site.register(Facts)
xadmin.site.register(QnA)
|
from words import sort_words_case_insensitively
def test_sort_words_case_insensitively():
words = ("It's almost Holidays and PyBites wishes You a "
"Merry Christmas and a Happy 2019").split()
actual = sort_words_case_insensitively(words)
expected = ['a', 'a', 'almost', 'and', 'and', 'Christmas',
'Happy', 'Holidays', "It's", 'Merry', 'PyBites',
'wishes', 'You', '2019']
assert actual == expected
def test_sort_words_case_insensitively_another_phrase():
words = ("Andrew Carnegie's 64-room chateau at 2 East 91st "
"Street was converted into the Cooper-Hewitt National "
"Design Museum of the Smithsonian Institution "
"in the 1970's").split()
actual = sort_words_case_insensitively(words)
expected = ['Andrew', 'at', "Carnegie's", 'chateau', 'converted',
'Cooper-Hewitt', 'Design', 'East', 'in', 'Institution',
'into', 'Museum', 'National', 'of', 'Smithsonian',
'Street', 'the', 'the', 'the', 'was', "1970's", '2',
'64-room', '91st']
assert actual == expected
def test_digit_inside_word_does_not_matter():
"""We only care about the first char being a number"""
words = ("It was the twenty9th of October when it was questioned"
"the meaning of nuMbers and weather hiding a number Inside"
"tex56t should be treated as a word or another number").split()
actual = sort_words_case_insensitively(words)
expected = ['a', 'a', 'and', 'another', 'as', 'be', 'hiding',
'Insidetex56t', 'It', 'it', 'meaning', 'number', 'number',
'nuMbers', 'October', 'of', 'of', 'or', 'questionedthe',
'should', 'the', 'treated', 'twenty9th', 'was',
'was', 'weather', 'when', 'word']
assert actual == expected
def test_words_with_mixed_chars_and_digits():
words = ("Let's see how4 this 1sorts, hope it works 4 this "
"B1te 22 55abc abc55").split()
actual = sort_words_case_insensitively(words)
expected = ['abc55', 'B1te', 'hope', 'how4', 'it', "Let's", 'see',
'this', 'this', 'works', '1sorts,', '22', '4', '55abc']
assert actual == expected |
def get_array(input_array):
splitter_arr = input_array.split(' ')
reversed_list = []
get_rev_bk_list = []
for word in splitter_arr:
print(word[::-1])
reversed_list.append(word[::-1])
reversed_list = sorted(reversed_list, key=lambda x: x[0])
for rev_word in reversed_list:
get_rev_bk_list.append(rev_word[::-1])
print(get_rev_bk_list)
return
get_array("massage yes massage yes massage")
|
import datetime
import json
import requests
from rest_framework.authtoken.models import Token
from .constants import SERVICE_CHOICES
from .models import Editor, Category, Author, Book
from .constants import local_api_service, google_api_service, get_google_book, get_oreilly_book, oreilly_api_service, local_save_book
def API_request(search, option, auth=None):
if option == "local":
url = local_api_service + search
headers = {
'Authorization': 'Token {}'.format(auth)
}
r = requests.request("GET", url, headers=headers, data={})
data = r.json()
if data:
data[0]['service'] = option
return data
elif option == SERVICE_CHOICES[0][0]:
service = google_api_service + search + '&maxResults=1'
r = requests.get(service)
data = r.json()
data['service'] = option
key = 'id'
method = local_save_book + '_g='
data['items'] = add_save_method(data['items'], method, key)
return data
elif option == SERVICE_CHOICES[1][0]:
service = oreilly_api_service + search
r = requests.get(service)
data = r.json()
data['service'] = option
key = 'archive_id'
method = local_save_book + '_o='
data['results'] = add_save_method(data['results'], method, key)
return data
def get_book_by_id(id, service):
if service == SERVICE_CHOICES[0][0]:
service = get_google_book + id
print(service)
elif service == SERVICE_CHOICES[1][0]:
service = get_oreilly_book + id
r = requests.get(service)
data = r.json()
return data
def add_save_method(books, method, key):
items = []
for book in books:
book['save_link'] = method + book[key]
items.append(book)
return items
def save_book(data, option):
if option == SERVICE_CHOICES[0][0]:
title = data["volumeInfo"]["title"]
try:
subtitle = data["volumeInfo"]["subtitle"]
except:
subtitle = title
try:
description = data["volumeInfo"]["description"]
except:
description = "Not found"
try:
image = data["volumeInfo"]["imageLinks"]["thumbnail"]
except:
image = None
date_str = data["volumeInfo"]["publishedDate"]
editor_name = data["volumeInfo"]["publisher"]
authors_name = data["volumeInfo"]["authors"]
try:
categories_name = data["volumeInfo"]["categories"]
except:
categories_name = ["No registrado"]
elif option == SERVICE_CHOICES[1][0]:
title = data["title"]
subtitle = data["title"]
description = data["description"]
image = data["cover"]
date_str = data["issued"]
editor_name = data["publishers"][0]["name"]
authors_name = [author['name'] for author in data["authors"]]
categories_name = [topic['name'] for topic in data["topics"]]
book_querty = Book.objects.filter(title=title)
if book_querty:
raise Exception("Title already exists")
editor_querty = Editor.objects.filter(name=editor_name)
if editor_querty:
editor = editor_querty.first()
else:
editor = Editor.objects.create(name=editor_name)
authors = []
for author_name in authors_name:
author = Author.objects.filter(name=author_name)
if author:
authors.append(author.first())
else:
author = Author.objects.create(name=author_name)
authors.append(author)
categories = []
for category_name in categories_name:
category = Category.objects.filter(name=category_name)
if category:
categories.append(category.first())
else:
category = Category.objects.create(name=category_name)
categories.append(category)
book = Book.objects.create(
title=title,
subtitle=subtitle,
release_date=convert_str_to_date(date_str),
image=image,
editor=editor,
description=description
)
book.authors.set(authors)
book.categories.set(categories)
return book
def convert_str_to_date(date_str):
if len(date_str.split('-')) == 1:
date_str = date_str + '-01-01'
elif len(date_str.split('-')) == 2:
date_str = date_str + '-01'
date_obj = datetime.datetime.strptime(date_str, '%Y-%m-%d')
print('Date:', date_obj.date())
return date_str
|
# -*- coding: utf-8 -*-
#title(),(全部字首大寫)
#capitalize(),(第一個字首大宿)
"""
upper():將字串轉成大寫,并返回一個拷貝
lower() :將字串轉成小写,并返回一個拷貝
capitalize() :將字串首字母大寫,并返回一個拷貝
title() :將每个單字的首字母大写,并返回一個拷貝
isupper() :判斷一個字串是否是大寫
islower() :判斷一個字串是否是小寫
"""
a=input()
print(a.upper())
print(a.capitalize()) |
import mglearn
import numpy as np
import pandas as pd
import sklearn
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib
import mglearn
from sklearn.datasets import load_boston
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
boston = load_boston()
print("데이터의 형태: {}".format(boston.data.shape))
#데이터의 구조 파악
print("structure of data :{}".format(boston.keys()))
print("structure of data :{}".format(boston['DESCR']))
#데이터 셋 불러오기
X,y = mglearn.datasets.load_extended_boston()
print("X.shape :{}".format(X.shape))
#최근접 이웃 분류
mglearn.plots.plot_knn_classification(n_neighbors=1)
#테스트 세트 성능 평가
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=66)
training_accuracy = []
test_accuracy = []
# 1에서 10까지 n_neighbors를 적용
neighbors_settings = range(1, 11)
for n_neighbors in neighbors_settings:
# 모델 생성
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# 훈련 세트 정확도 저장
training_accuracy.append(clf.score(X_train, y_train))
# 일반화 정확도 저장
test_accuracy.append(clf.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="훈련 정확도")
plt.plot(neighbors_settings, test_accuracy, label="테스트 정확도")
plt.ylabel("정확도")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
|
from .ttmltosrt import convert_file, srt_generator
__version__ = '1.3.0'
__all__ = ['convert_file', 'srt_generator']
|
# see http://www.unicode.org/reports/tr15/#Canon_Compat_Equivalence
import unicodedata
def nfkc(word):
return [unicodedata.normalize("NFKC", word)]
|
from django.db import migrations, models
"""
Hi! I made this because I can't stop making the same mistake: Never use a third-party ID as foreign
key, because every change will be painful. I am fixing this mistake here: We're going to walk over
all tables that reference osmcal_user.osm_id and replace this with a serial id.
This involves a little bit of fiddling, as you might see.
"""
user_id_dependent_tables = [
("osmcal_eventlog", "created_by_id", "osmcal_eventlog_created_by_id_89c62fed_fk_osmcal_user_osm_id"),
("osmcal_eventparticipation", "user_id", "osmcal_eventparticip_user_id_8a2dfe0f_fk_osmcal_us"),
("osmcal_participationanswer", "user_id", "osmcal_participation_user_id_93228060_fk_osmcal_us"),
("osmcal_user_groups", "user_id", "osmcal_user_groups_user_id_c9d0a3d1_fk_osmcal_user_osm_id"),
("osmcal_user_user_permissions", "user_id", "osmcal_user_user_per_user_id_1ecd1641_fk_osmcal_us"),
("django_admin_log", "user_id", "django_admin_log_user_id_c564eba6_fk_osmcal_user_osm_id"),
]
def conversion_sql():
sql = ""
for t in user_id_dependent_tables:
sql += """
ALTER TABLE {0} DROP CONSTRAINT {2};
UPDATE {0} SET {1} = (SELECT id FROM osmcal_user WHERE osm_id = {0}.{1});
ALTER TABLE {0} ADD CONSTRAINT {2} FOREIGN KEY ({1}) REFERENCES osmcal_user (id);
""".format(*t)
return sql
class Migration(migrations.Migration):
dependencies = [
('osmcal', '0022_event_cancelled'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
migrations.RunSQL([
'ALTER TABLE osmcal_user ADD column id serial UNIQUE;',
conversion_sql(),
'ALTER TABLE osmcal_user DROP CONSTRAINT osmcal_user_pkey;',
'ALTER TABLE osmcal_user ADD PRIMARY KEY (id);',
])
],
state_operations=[
migrations.AddField(
model_name='user',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='osm_id',
field=models.IntegerField(),
),
]
)
]
|
#ENG II 26/04/21
def area_circuferencia(raio):
"""calcula a area de uma circuferencia"""
area = 3.14 * (raio * raio)
return area
def perimetro_circuferencia(raio):
"""Calcula o perimetro de uma circuferencia"""
perimetro = 2 * 3.14 * raio
return perimetro
def area_retangulo():
"""calcula a area do retangulo"""
return area
def perimetro_retangulo():
"""calcula o perimetro do retangulo"""
return perimetro
opcao = -1
while opcao != 0:
print('Escolha a opção desejada')
print()
print('1 -cálculo da área da circuferencia ')
print('2 -cálculo do perimetro da circuferencia')
print('3 -cálculo da área do retangulo')
print('4 -cálculo do perimetro do retangulo')
print('0 - Sair')
print()
opcao = int(input('Entre com o número da opção desejada: '))
if (opcao == 1):
raio = float(input('Entre com o valor do raio, para obter a área: '))
area = 3.14 * (raio * raio)
print("A área da circuferencia: {:.2f}".format(area))
elif (opcao == 2):
raio = float(input('Entre com o valor do raio, para obter o perimetro: '))
perimetro = 2 * 3.14 * raio
print("O perimetro da circuferencia é: {:.2f}".format(perimetro))
|
# -*- coding: utf-8 -*-
__author__ = 'lish'
import time,datetime
import urllib2,cookielib,socket
import urllib,random
import re,json,os
import sys,time
import requests,MySQLdb
import crawlLWS as lws
import dealLWSdb as lwsdb
from multiprocessing.dummy import Pool as ThreadPool
import sys
reload(sys)
sys.setdefaultencoding('utf8')
requests.packages.urllib3.disable_warnings()
base_url='http://s.haohuojun.com/'
def linkSQL(host,user,passwd,db):
global cursor,conn
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,charset="utf8",db=db)
cursor = conn.cursor()
return conn
def checkGuideids(guideids):
ssql=str(tuple(guideids)).replace(',)',')')
sql="select guide_id from ec_con.con_guide where guide_id in "+str(ssql)
n = cursor.execute(sql)
checkguideids=[]
for row in cursor.fetchall():
checkguideid=str(row[0])
# print checkguideid
checkguideids.append(checkguideid)
# print checkguideid
#获取旧的攻略信息
check_sql='select guide_title,guide_brief,guide_short_title from ec_con.con_guide where guide_id ='+str(checkguideid)
n = cursor.execute(check_sql)
old_infos=list(cursor.fetchall()[0])
# print old_infos,'???'
guideinfos=lws.AnalyzeGuides(checkguideid,True)
# print guideinfos,'????????????ß'
#获取新的攻略信息并与旧信息对比
#comments_count,guide_id,liked,likes_count,realcreated_at,share_msg,short_title,status,template,title,updated_at,guide_cover_url,guide_html_content
if len(guideinfos)==2 and old_infos !=[]:
new_infos=guideinfos[1]
# print 'new_infos'
new_checkinfos=[new_infos[9],new_infos[5],new_infos[6]]
if old_infos!=new_checkinfos:
#更新攻略信息
new_checkinfos=new_checkinfos+[checkguideid]
renew_sql='update ec_con.con_guide set guide_title="%s",guide_brief="%s",guide_short_title="%s",modify_time=now() where guide_id =%s ;' % tuple(new_checkinfos)
# print renew_sql
n = cursor.execute(renew_sql)
conn.commit()
#推送价格更新消息!
os.system("./ec_message/message -t 2 --guide "+str(checkguideid))
else:
#更新攻略创建和修改时间
realcreated_at=new_infos[4]
renew_sql="update ec_con.con_guide set create_time=from_unixtime("+str(realcreated_at)+",'%Y-%m-%d %H:%i:%s'),modify_time=now() where guide_id ="+str(checkguideid)
# print renew_sql
n = cursor.execute(renew_sql)
conn.commit()
return checkguideids
def checkGoodsids(goodids):
# print goodids
ssql=str(tuple(goodids)).replace(',)',')')
sql="select goods_id from ec_con.con_goods where goods_id in "+str(ssql)
# print sql
n = cursor.execute(sql)
checkgoodsids=[]
for row in cursor.fetchall():
checkgoodsid=str(row[0])
checkgoodsids.append(checkgoodsid)
#获取已存在商品的旧价格
check_sql='select goods_price from ec_con.con_goods where goods_id ='+str(checkgoodsid)
n = cursor.execute(check_sql)
oldprice=int(cursor.fetchall()[0][0])
#获取已存在商品的新价格
# print checkgoodsid
goodsinfos=lws.AnalyzeGoods(checkgoodsid)
# print goodsinfos
newprice=int(float(goodsinfos[11])*100)
#判断新旧价格是否一直,从而判断是否需要更新
if newprice<oldprice:
#更新价格
renew_sql="update ec_con.con_goods set goods_price="+str(newprice)+" where goods_id ="+str(checkgoodsid)
n = cursor.execute(renew_sql)
conn.commit()
#推送价格更新消息!
os.system("./ec_message/message -t 1 --goods "+str(checkgoodsid)+" --price "+str(newprice)+" --oldPrice "+str(oldprice))
elif newprice>oldprice:
renew_sql="update ec_con.con_goods set goods_price="+str(newprice)+" where goods_id ="+str(checkgoodsid)
n = cursor.execute(renew_sql)
conn.commit()
return checkgoodsids
class InsertCCC(object):
def PopPage(self,category_id,content_type,content_ids):
"""
改函数用于更新热门下的商品:
"""
# 删除下线的记录
dec_sql="delete from ec_con.con_category_content where category_id="+str(category_id)+' and content_type='+str(content_type)+' and status=0'
n = cursor.execute(dec_sql)
conn.commit()
# 得到已经添加过了的的记录
isExistCids_sql='select content_id from ec_con.con_category_content where category_id='+str(category_id)+' and content_type=1'
n = cursor.execute(isExistCids_sql)
isExistCids=[int(row[0]) for row in cursor.fetchall()]
# 获取对应category_id和content_type下已经存在了的排序号category_location
contents=[]
gainRanks_sql='select DISTINCT category_location from ec_con.con_category_content where category_id='+str(category_id)+' and content_type='+str(content_type)
n = cursor.execute(gainRanks_sql)
locationRanks=[int(row[0]) for row in cursor.fetchall()]
# 将插入的数据解析为以元祖为单位组成的列表
for content_id in list(set(content_ids)):
if int(content_id) not in isExistCids:
locationI=0
while locationI<10000:
locationI+=1
if locationI not in locationRanks:
content_id=str(int(content_id))
contents+=[tuple([category_id,content_id,content_type,locationI])]
locationRanks.append(locationI)
locationI=10000
# 插入新的记录
insert_sql="INSERT INTO ec_con.con_category_content (category_id,content_id,content_type,category_location,status) values (%s,%s,%s,%s,1) "
n = cursor.executemany(insert_sql,contents)
conn.commit()
def ClassPage(self,categoryid,content_type,status=0,Ndays=1):
"""
改函数用于更新专题下的攻略:
categoryid:con_category中的category_id,同时也是对应着con_category_content中的con_category中的category_id的内容,这里的category_id代表的是某页面的区块位置;
Ndays:表示更新N天前到当前时间的数据到对应的页面区块中;
status:表示更新到con_category_content内容表中的数据的默认状态,1为上线,0为下线;
"""
# Today=time.strftime('%Y%m%d',time.localtime())
Today=datetime.date.today()
NdaysAgo=Today - datetime.timedelta(days=Ndays)
# print NdaysAgo
if categoryid==102121244:
locationI=0
del_sql= 'delete from ec_con.con_category_content where category_id='+str(categoryid)+' and content_type='+str(content_type)
n = cursor.execute(del_sql)
conn.commit()
Goodsids_sql="select guide_id from ec_con.con_guide where create_time >"+str(NdaysAgo).replace('-','')+" order by create_time "
n = cursor.execute(Goodsids_sql)
elif categoryid==102121250:
###区块102121250,也就是发现里面的看看买什么模块
CCClocations_sql='SELECT max(category_location) from ec_con.con_category_content where content_type='+str(content_type)
n = cursor.execute(CCClocations_sql)
locationI=int(cursor.fetchall()[0][0])
TCCCids_sql="""
select topic_id from ec_con.con_topic a LEFT join
(SELECT * from ec_con.con_category_content where content_type=3)b on a.topic_id=b.content_id
where b.content_id is null and a.create_time >"""+str(NdaysAgo).replace('-','')
n = cursor.execute(TCCCids_sql)
CCCinfos=[]
for row in cursor.fetchall():
locationI+=1
CCCinfos.append(tuple([categoryid,int(row[0]),content_type,locationI,status]))
if CCCinfos!=[]:
insert_sql="INSERT INTO ec_con.con_category_content (category_id,content_id,content_type,category_location,status) values (%s,%s,%s,%s,%s) "
n = cursor.executemany(insert_sql,CCCinfos)
conn.commit()
def crawlGoods(id):
lws.AnalyzeGoods(id)
def main():
lws.clearInfosFile()
iccc=InsertCCC()
goodsAllIds=[]
#精选页面第三个模块的攻略
Selection= lws.crawlSelectionGuides()
selectionguidesblock3guideids=Selection.dealBlock3(1)
#分类页面第一个模块的攻略
Class=lws.crawlClassGuides()
classguideblock1guideids=Class.dealBlock1(1)
#分类页面第二个模块的攻略
classguideblock2guideids=Class.dealBlock2(1)
print selectionguidesblock3guideids,'?'
print classguideblock1guideids,'??'
print classguideblock2guideids,'???'
guideAllIds=selectionguidesblock3guideids+classguideblock1guideids+classguideblock2guideids
checkGuideAllIds=checkGuideids(guideAllIds)
updateGuideAllIds=list(set(guideAllIds) - set(checkGuideAllIds))
for updateGuideAllId in updateGuideAllIds:
goodsAllIds+=lws.AnalyzeGuides(updateGuideAllId)
#更新热门页面
popularGoodsids=lws.crawlPopularGoodss(1)
goodsAllIds=goodsAllIds+popularGoodsids
print goodsAllIds
goodsAllIds=list(set(goodsAllIds))
if goodsAllIds!=[]:
checkGoodsAllIds=checkGoodsids(goodsAllIds)
updategoodsAllIds=list(set(goodsAllIds)-set(checkGoodsAllIds))
for updategoodsAllId in updategoodsAllIds:
lws.AnalyzeGoods(updategoodsAllId)
# pool = ThreadPool(5)
# results = pool.map(crawlGoods,updategoodsAllIds)
# pool.close()
# pool.join()
# # 抓取的数据入库
# lwsdb.main()
# # 攻略生成并刷新cdn
# lws.creatGuidesHtml(guideAllIds)
# lws.release_cdn(base_url+'guides/html',0)
# lws.release_cdn(base_url+'goods/html',0)
# # 处理区块102121235:热门
# iccc.PopPage(102121235,1,popularGoodsids)
# # 处理区块102121250:发现-攻略
# iccc.ClassPage(102121250,3,1)
# # 处理区块102121244:发现-专题
# iccc.ClassPage(102121244,2,1,7)
if __name__ == '__main__':
# global conn,cursor
# host="100.98.73.21"
# user="commerce"
# passwd="Vd9ZcDSoo8eHCAVfcUYQ"
# conn=MySQLdb.connect(host=host,user=user,passwd=passwd,charset="utf8",db='ec_con')
# cursor = conn.cursor()
main()
|
# Generated by Django 2.2.7 on 2020-01-01 08:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20191212_1904'),
]
operations = [
migrations.AddField(
model_name='user',
name='initial',
field=models.BooleanField(default=True, verbose_name='첫 사용 여부'),
),
migrations.AddField(
model_name='user',
name='push_token',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='푸쉬 토큰'),
),
]
|
import tkinter as tk
from text import get_text
from tqdm import tqdm
from tag_LFM import RLTMF
class mix_re():
def __init__(self, window):
self.data = []
self.users = set()
self.items = set()
self.flag = True
self.read_data('ratings.dat')
self.load_model()
self.mix_recommend = tk.Toplevel(window, bg='pink')
self.mix_recommend.geometry('300x300')
self.mix_recommend.title('混合推荐')
show_user = tk.Button(self.mix_recommend, bg='Light blue', text='显示所有用户',
command=self.show_users)
show_user.place(x=100, y=20)
tk.Label(self.mix_recommend, bg='Light blue', text='推荐长度:').place(x=20, y=60)
int_n = tk.IntVar()
int_n.set(10)
self.recommend_n = tk.Entry(self.mix_recommend, textvariable=int_n)
self.recommend_n.place(x=80, y=60)
tk.Label(self.mix_recommend, bg='Light blue', text='用户名:').place(x=20, y=100)
var_user_name = tk.StringVar()
self.entry_user_name = tk.Entry(self.mix_recommend, textvariable=var_user_name)
self.entry_user_name.place(x=80, y=100)
to_rec = tk.Button(self.mix_recommend, text='进行推荐', command=self.push_top_N)
to_rec.place(x=80, y=150)
def read_data(self, filename):
# with open(filename) as f:
# pstr = f.read()
# self.test = eval(pstr)
with open(filename) as f:
token = ','
if '.dat' in filename:
token = '::'
lines = f.readlines()[1:]
pbar = tqdm(total=len(lines))
for line in lines:
fields = line.strip().split(token)
# print(fields)
self.users.add(fields[0])
self.items.add(fields[1])
self.data.append(fields[:3])
pbar.update(1)
pbar.close()
self.users = sorted(self.users)
self.items = sorted(self.items)
tk.messagebox.showinfo('提示', message='数据读取成功')
def load_model(self):
self.model = RLTMF(10, 10)
self.model.ReadModel(setTrain=True)
# self.model.setEvalPara(10)
tk.messagebox.showinfo('提示', message='模型加载完毕')
def show_users(self):
self.all_user = tk.Toplevel(self.mix_recommend, bg='pink')
self.all_user.geometry('600x600')
self.all_user.title('所有用户')
self.text = get_text(self.all_user, 200, 100, (200, 200))
self.text.insert('insert', ' 用户列表\n')
self.text.update()
self.text.pack()
num_users = len(self.users)
for index in range(0, num_users, 10):
self.text.insert('insert', ' '.join(['%6s' % user for user in self.users[index:index+8]])+'\n')
self.text.update()
def push_top_N(self):
user = self.entry_user_name.get()
top_n = self.recommend_n.get()
try:
top_n = int(top_n)
except BaseException:
tk.messagebox.showinfo('错误', message='输入列表长度必须是数字格式!')
return
if top_n > 100:
tk.messagebox.showinfo('错误', message='推荐列表长度设置过大!')
return
self.model.setEvalPara(int(top_n))
# if not self.flag:
# self.text.pack_forget()
rec_list = self.model.TopN(user, choice="RATING")
if rec_list is None:
tk.messagebox.showinfo('错误', message='系统中没有该用户id,请查看【所有用户】!')
return
push_win = tk.Toplevel(self.mix_recommend, bg='pink')
push_win.geometry('200x400')
push_win.title('所有用户')
text = get_text(push_win, 200, 100, (200, 400))
text.insert('insert', '%s %s %s\n' % ('行号', '物品id', '预测评分'))
text.insert('insert', '\n'.join(['%3d %7s %.3f'%(i, item, rating) for i, (item, rating) in enumerate(rec_list)])) |
import numpy as np
import os,sys
from sklearn import linear_model
from sklearn import neighbors
from sklearn import svm
from sklearn import preprocessing
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
import tensorflow as tf
from tensorflow import keras
from helpers import *
from plots import *
import postprocessing
from models import *
from skimage.filters import gaussian
###########################################
# Initialize parameters :
###########################################
# Number of training images
n = 100
seed = 0
tf.random.set_seed(seed)
patch_size = 16
aggregate_threshold = 0.3
foreground_threshold = 0.25
# Extraction function
extraction_func = extract_features_6d
preproc = preprocessing.StandardScaler()
# Using image post-processing
image_proc = True
###########################################
# Data extraction and preprocessing :
###########################################
# Load a set of images
imgs, gt_imgs = load_training_images(n)
# Apply a gaussian blur :
for i in range(len(imgs)) :
imgs[i] = gaussian(imgs[i], sigma = 2, multichannel = True)
# Extract patches from all images
img_patches = get_patches(imgs, patch_size)
gt_patches = get_patches(gt_imgs, patch_size)
# Get features for each image patch
X = get_features_from_patches(img_patches, extraction_func)
Y = get_labels_from_patches(gt_patches, foreground_threshold)
# Standardization :
if preproc is not None:
preproc = preproc.fit(X)
X = preproc.transform(X)
###########################################
# Select the model :
###########################################
# Uncomment the model that you want to use
# model = knn(X, Y, seed)
model = neural_net(X, Y)
###########################################
# Submission
###########################################
create_submission(model, extraction_func, patch_size, preproc, aggregate_threshold, image_proc)
|
#!/usr/bin/env python
import sys
import requests
import json
import logging
import argparse
import datetime
from boto.utils import get_instance_identity
from lockfile import FileLock
if sys.version_info < (2, 6):
if __name__ == "__main__":
sys.exit("Error: we need python >= 2.6.")
else:
raise Exception("we need python >= 2.6")
# Global variables
ES_LOCAL_URL = 'http://127.0.0.1:9200'
# Requests timeout in seconds
REQUESTS_TIMEOUT = 30
def es_leadership_check():
'''The simple check to verify if this node is the leader
in the cluster and can run the script by schedule with many nodes
available. '''
# Get info through API
es_state_master_url = ES_LOCAL_URL + '/_cluster/state/master_node'
es_state_local_url = ES_LOCAL_URL + '/_nodes/_local/nodes'
try:
master_state = requests.get(es_state_master_url,
timeout=REQUESTS_TIMEOUT).json()
local_state = requests.get(es_state_local_url,
timeout=REQUESTS_TIMEOUT).json()
except:
logging.exception("Failure getting ES status information through API")
raise
# Do research if we're master node
try:
local_node_name = local_state['nodes'].keys()[0]
master_node_name = master_state['master_node']
except:
logging.exception("Failure parsing node data")
raise
# Finally decide if we passed
if local_node_name == master_node_name:
logging.debug("We're master node, ID %s matches"
% (master_node_name))
return True
else:
logging.debug("We're NOT master node, master ID is %s"
% (master_node_name))
return False
def create_repository(args):
'''Initial create of repository'''
create_repository_url = '/'.join([ES_LOCAL_URL, '_snapshot',
args.repository])
# Get the region from the instance
try:
instance_metadata = get_instance_identity()
instance_region = instance_metadata['document']['region']
except:
logging.exception("Failure getting EC2 instance data")
raise
# Repository data
create_repository_data = {
"type": "s3",
"settings": {
"bucket": args.s3_bucket,
"region": instance_region,
"base_path": args.s3_path
}
}
try:
headers = {'content-type': 'application/json'}
create_repository_request = requests.put(create_repository_url,
data=json.dumps(create_repository_data),
headers=headers,
timeout=REQUESTS_TIMEOUT)
create_repository_request.raise_for_status()
except:
logging.exception("Failure creating repository")
raise
repository_ = ("Created or updated repository: %s" % args.repository)
return repository_
def delete_repository(args):
'''Deletion of repository'''
delete_repository_url = '/'.join([ES_LOCAL_URL, '_snapshot',
args.repository])
# Get the region from the instance
try:
delete_repository_request = requests.delete(delete_repository_url,
timeout=REQUESTS_TIMEOUT)
delete_repository_request.raise_for_status()
except:
logging.exception("Failure deleting repository")
raise
return "Deleted repository: %s" % args.repository
def list_snapshots(args):
'''Wrapper for list ES snapshot function to handle args passing'''
snapshots = list_es_snapshots(args.repository)
# Pretty print
if snapshots:
snapshots_info = json.dumps(snapshots,
sort_keys=True,
indent=4,
separators=(',', ': '))
return snapshots_info
def list_es_snapshots(repository):
'''List avaliable snapshots'''
# Get info through API
repository_info_url = '/'.join([ES_LOCAL_URL, '_snapshot',
repository, '_all'])
try:
snapshots_list = requests.get(repository_info_url,
timeout=REQUESTS_TIMEOUT)
snapshots_list.raise_for_status()
except:
logging.exception("Failure getting ES status information through API")
raise
if snapshots_list:
return snapshots_list.json()['snapshots']
def list_repositories(args):
'''List avaliable repositories'''
# Get info through API
repository_info_url = '/'.join([ES_LOCAL_URL, '_snapshot'])
try:
repositories_info = requests.get(repository_info_url,
timeout=REQUESTS_TIMEOUT)
repositories_info.raise_for_status()
except:
logging.exception("Failure getting ES status information through API")
raise
# Print data
if repositories_info.json():
repositories_list = json.dumps(repositories_info.json(),
sort_keys=True,
indent=4,
separators=(',', ': '))
else:
repositories_list = False
return repositories_list
def create_snapshot(args):
'''Trigger snapshot of ES'''
# Check if we're the leader to do this job
if args.check_leadership:
if not es_leadership_check():
logging.warn("Our instance isn't suitable"
"to make snapshots in the cluster")
return False
# If not defined snapshot name,
# then default snapshot naming uses repository name plus date-time
if not args.snapshot_name:
snapshot_timestamp = datetime.datetime.today().strftime('%Y-%m-%d_%H:%M:%S')
snapshot_name = ".".join([args.repository, snapshot_timestamp])
logging.debug("Using auto created snapshot name %s" % (snapshot_name))
else:
snapshot_name = args.snapshot_name
snapshot_url = "/".join([ES_LOCAL_URL, '_snapshot', args.repository,
snapshot_name])
# Trigger snapshot
try:
trigger_snapshot = requests.put(snapshot_url,
timeout=REQUESTS_TIMEOUT)
trigger_snapshot.raise_for_status()
except:
logging.exception("Failure triggering snapshot through API")
raise
return 'Triggered snapshot with name: %s' % (snapshot_name)
def restore_snapshot(args):
'''Trigger snapshot restore to ES. Note - existing index should be closed before'''
# Check if we're the leader to do this job
if args.check_leadership:
if not es_leadership_check():
logging.warn("Our instance isn't suitable"
"to make snapshots in the cluster")
return False
restore_url = "/".join([ES_LOCAL_URL, '_snapshot', args.repository,
args.snapshot_name, '_restore']) + '?wait_for_completion=true'
# Restore
try:
logging.info("Starting restore of snapshot data from repo."
"Note: this is the long process, the script will exit once it finished")
restore_snapshot = requests.post(restore_url)
restore_snapshot.raise_for_status()
except:
logging.exception("Failure triggering snapshot restore through API")
raise
return 'Finished snapshot restore with name: %s' % (args.snapshot_name)
def delete_snapshot(args):
'''Wrapper around real delete snapshot function
to handle args passing'''
# Check if we're the leader to do this job
if args.check_leadership:
if not es_leadership_check():
logging.warn("Our instance isn't suitable"
"to make snapshots in the cluster")
return False
return delete_es_snapshot(args.repository, args.snapshot_name)
def delete_es_snapshot(repository, snapshot_name):
'''Delete snapshot'''
snapshot_delete_url = "/".join([ES_LOCAL_URL, '_snapshot', repository,
snapshot_name]) + '?wait_for_completion=true'
# Trigger snapshot deletion and wait for completion.
try:
trigger_snapshot_deletion = requests.delete(snapshot_delete_url)
trigger_snapshot_deletion.raise_for_status()
except:
logging.exception("Failure deleting snapshot through API")
raise
return 'Deleted snapshot with name: %s' % (snapshot_name)
def cleanup_snapshots(args):
'''Delete older than retention age snapshots with specified tags.'''
# Check if we're the leader to do this job
if args.check_leadership:
if not es_leadership_check():
logging.warn("Our instance isn't suitable"
"to make snapshots in the cluster")
return False
# Get the list of available snapshots
snapshots = list_es_snapshots(args.repository)
logging.debug("Snapshots list: %s" % (snapshots))
# Delete stale snapshots older than retention date
if snapshots:
# Retention date for older snapshots
retention_date = datetime.datetime.today() - datetime.timedelta(days=args.retention)
logging.debug("Retention date: %s" % (retention_date))
stale_snapshots = [snapshot for snapshot in snapshots
if datetime.datetime.strptime(snapshot['start_time'],
'%Y-%m-%dT%H:%M:%S.%fZ') < retention_date]
logging.info("Stale snapshots that are older "
"than retention date %s: %s"
% (retention_date, stale_snapshots))
for snapshot in stale_snapshots:
try:
delete_es_snapshot(args.repository, snapshot['snapshot'])
except:
logging.exception("Failure deleting snapshot %s through API" %
(snapshot['snapshot']))
raise
logging.info("Deleted snapshot: %s" % snapshot['snapshot'])
else:
stale_snapshots = None
return "Deleted stale snapshots: %s" % ([snapshot['snapshot']
for snapshot in stale_snapshots])
def argument_parser():
# Parse all arguments
epilog = "EXAMPLE: %(prog)s create_snapshot --repository elasticsearch-dev"
description = "Manage backup of ES cluster indices to S3 and restore"
parser = argparse.ArgumentParser(description=description, epilog=epilog)
subparsers = parser.add_subparsers(help='valid subcommands')
# Here goes a list of subcommands, that call related functions
parser_create_snapshot = subparsers.add_parser('create_snapshot',
help='Trigger ES to create snapshot')
parser_create_snapshot.add_argument("--repository", "-r",
type=str,
required=True,
help="Registered in ES cluster repository for snapshots")
parser_create_snapshot.add_argument("--snapshot-name", "-s",
type=str,
required=False,
help="Custome name to make snapshot")
parser_create_snapshot.add_argument("--check-leadership",
action='store_true',
required=False,
help="Checks if we're allowed to do the job with multiple nodes available")
parser_create_snapshot.set_defaults(script_action=create_snapshot)
parser_restore_snapshot = subparsers.add_parser('restore_snapshot',
help='Restore index to instance/cluster from repository snapshot in S3')
parser_restore_snapshot.add_argument("--repository", "-r",
type=str,
required=True,
help="Registered in ES cluster repository for snapshots")
parser_restore_snapshot.add_argument("--snapshot-name", "-s",
type=str,
required=True,
help="Snapshot name to restore")
parser_restore_snapshot.add_argument("--check-leadership",
action='store_true',
required=False,
help="Checks if we're allowed to do the job with multiple nodes available")
parser_restore_snapshot.set_defaults(script_action=restore_snapshot)
parser_list_repositories = subparsers.add_parser('list_repositories',
help='List available repositories')
parser_list_repositories.set_defaults(script_action=list_repositories)
parser_list_snapshots = subparsers.add_parser('list_snapshots',
help='List available snapshots')
parser_list_snapshots.add_argument("--repository", "-r",
type=str,
required=True,
help="Registered in ES cluster repository for snapshots")
parser_list_snapshots.set_defaults(script_action=list_snapshots)
parser_create_repository = subparsers.add_parser('create_repository',
help='Initial create of repository')
parser_create_repository.add_argument("--repository", "-r",
type=str,
required=True,
help="Repository name for snapshots")
parser_create_repository.add_argument("--s3-bucket",
type=str,
required=True,
help="Created S3 BUCKET_NAME")
parser_create_repository.add_argument("--s3-path",
type=str,
default="/",
help="Path within S3 BUCKET_NAME if any, e.g. ROLE/ENV")
parser_create_repository.set_defaults(script_action=create_repository)
parser_delete_repository = subparsers.add_parser('delete_repository',
help='Initial delete of repository')
parser_delete_repository.add_argument("--repository", "-r",
type=str,
required=True,
help="Repository name for snapshots")
parser_delete_repository.set_defaults(script_action=delete_repository)
parser_cleanup_snapshots = subparsers.add_parser('cleanup_snapshots',
help='Cleanup old snapshots with retention period')
parser_cleanup_snapshots.add_argument("--check-leadership",
action='store_true',
required=False,
help="Checks if we're allowed to do the job with multiple nodes available")
parser_cleanup_snapshots.add_argument("--repository", "-r",
type=str,
required=True,
help="Registered in ES cluster repository for snapshots")
parser_cleanup_snapshots.add_argument("--retention",
type=int, default=30,
help="Delete snapshots older than specified"
"retention days period")
parser_cleanup_snapshots.set_defaults(script_action=cleanup_snapshots)
parser_delete_snapshot = subparsers.add_parser('delete_snapshot',
help='Delete specified snapshot')
parser_delete_snapshot.add_argument("--repository", "-r",
type=str,
required=True,
help="Registered in ES cluster repository for snapshots")
parser_delete_snapshot.add_argument("--snapshot-name", "-s",
type=str,
required=False,
help="Snapshot name to delete")
parser_delete_snapshot.add_argument("--check-leadership",
action='store_true',
required=False,
help="Checks if we're allowed to do the job with multiple nodes available")
parser_delete_snapshot.set_defaults(script_action=delete_snapshot)
parser.add_argument("--loglevel",
type=str, default='INFO',
choices=['DEBUG', 'INFO', 'WARNING',
'ERROR', 'CRITICAL',
'debug', 'info', 'warning',
'error', 'critical'],
help="set output verbosity level")
# Parse all arguments
args = parser.parse_args()
return args
def main():
args = argument_parser()
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s',
level=getattr(logging, args.loglevel.upper(), None))
# Use function accordingly to action specified
try:
output = args.script_action(args)
if output:
print(output)
except:
print("ERROR: failure running with script action")
print("ERROR:", sys.exc_info())
sys.exit(-1)
if __name__ == '__main__':
# Initialise locking
lockfile = FileLock("/var/lock/elasticsearch-backup.lock")
if lockfile.is_locked():
print("ERROR: /var/lock/elasticsearch-backup.lock is already locked,"
"probably we're already running")
sys.exit(1)
else:
with lockfile:
main()
|
# -*- coding: utf-8 -*-
# Operações Aritméticas
#subtração
print(11-2)
#resultado: 9
#soma
print(5+2)
#resultado: 7
#divisão
print(10/2)
#resultado: 5.0
#multiplicação
print(2*3)
#resultado: 6
#pegar apenas a parte inteira da divisão
print(10//9) #1.111111 o Python pega apenas a parte inteira ou seja 1.
#resultado: 1
#exponenciação
print(2**5) #isso é igual a dois elevado a quinta potencia, ou seja, 2x2x2x2x2 = 32
#resultado: 32
input("Pressione qualquer tecla para continuar") |
def solution(sizes):
sizes = [sorted(size) for size in sizes]
return max(sizes, key = lambda x: x[0])[0] * max(sizes, key = lambda x: x[1])[1] |
#!/usr/bin/env python3
#test_makeBigWig.py
#*
#* --------------------------------------------------------------------------
#* Licensed under MIT (https://git.biohpc.swmed.edu/gudmap_rbk/rna-seq/-/blob/14a1c222e53f59391d96a2a2e1fd4995474c0d15/LICENSE)
#* --------------------------------------------------------------------------
#*
import pytest
import pandas as pd
import os
import utils
test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
'/../../'
@pytest.mark.makeBigWig
def test_makeBigWig():
assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.se.bw'))
|
from datetime import datetime, timedelta
dfr = datetime.now() - timedelta(days=1)
print (dfr)
if datetime.now() > dfr:
print (True)
|
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
cache = {}
for i in range(len(numbers)):
if target - numbers[i] in cache:
return [cache[target - numbers[i]], i+1]
cache[numbers[i]] = i + 1
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
i, j = 0, len(numbers) - 1
while i < j:
if numbers[i] + numbers[j] < target:
i += 1
elif numbers[i] + numbers[j] > target:
j -= 1
else:
return [i+1, j+1]
|
import numpy as np
import time
import random
from find_ball import FindBall
from KF import KF
if __name__ == "__main__":
fb = FindBall()
# kf = KF(mu0 = np.zeros((6,1)), sigma0 = 0.1 * np.eye(6),
# C = np.hstack((np.eye(3), np.zeros((3, 3)))), Q = 0.1 * np.eye(6),
# R = 0.1 * np.eye(6), g = -9.8, delta_t = 0.1)
# kf.startKF()
tot_frames = 0
time_old = time.time()
while True:
try:
r, x, y, z = fb.find_ball()
# r, x, y, z = np.random.rand(4)
# if r < 0.10:
# print("update! [{}, {}, {}], r = ".format(x, z, -y, r))
# kf.update(np.array([x, z, -y]))
# time.sleep(0.025)
tot_frames += 1
print('done')
except Exception as ex:
print('no ball')
pass
except KeyboardInterrupt:
break
time_now = time.time()
print(tot_frames / (time_now - time_old))
|
# Generated by Django 2.0.5 on 2018-06-03 15:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calculation', '0010_auto_20180603_1526'),
]
operations = [
migrations.AddField(
model_name='map',
name='unit',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='calculation.Unit', verbose_name='ед. изм'),
),
]
|
import dash_bootstrap_components as dbc
button_group = dbc.ButtonGroup(
[
dbc.Button("Left", color="danger"),
dbc.Button("Middle", color="warning"),
dbc.Button("Right", color="success"),
]
)
|
'''
Facial analysis
2. face ++
a. locate the user by face groupping
b. identify user has partner, has child or not
1). parse all image captions to capture keyword
i). #mygirdfriend, #myboyfriend, #myhusband, #mywife, #mychildreb etc.
2). person repeatedly appear in the user's timeline => has partener
3). child repeatedly appear in the user's timeline => has child
c. get the average smile index
'''
import logging
import time
import json
from os import listdir
from facepp import API
from facepp import File
from pprint import pformat
api = ''
BATCH_ID = ''
output = ''
output_external_usage = ''
MAX_FACESET = 5
DELAY = 5
FILE_THRESHOLD = 30
FACE_THRESHOLD = 5
SESSION_INQUEUE = []
SESSION_FACESET_MAP = {}
FACE_POST_TIME_MAP = {}
REMOVEABLE_FACESETS = []
def init(batch_id, key, secret):
global api, BATCH_ID, output, output_external_usage
# constants
api = API(key, secret)
BATCH_ID = batch_id
log_dir = 'logs/0/cat_log/log_batch_' + BATCH_ID + '.log'
output_dir = 'output/0/cat/output_batch_' + BATCH_ID + '.txt'
output_external_usage_dir = 'output/0/cat/output_external_usage_batch_' + BATCH_ID + '.txt'
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, filename=log_dir)
output = open(output_dir, 'a+')
output_external_usage = open(output_external_usage_dir, 'a+')
def delete_faceset(faceset_id):
global REMOVEABLE_FACESETS
# make sure the faceset is deleted
del_resp = api.faceset.delete(faceset_id=faceset_id)
while not del_resp['success']:
logging.info ('BATCH_' + BATCH_ID + ':faceset:' + faceset_id + ' delete failed, retry... ')
del_resp = api.faceset.delete(faceset_id=faceset_id)
time.sleep(1)
if del_resp['success']:
# logging.info ('BATCH_' + BATCH_ID + ':faceset:'+ faceset_id +'delete succeed.')
REMOVEABLE_FACESETS.remove(faceset_id)
if len(REMOVEABLE_FACESETS) > 0:
for removable_faceset in REMOVEABLE_FACESETS:
delete_faceset(removable_faceset)
return True
def process_faces_in_timeline(username):
global MAX_FACESET, DELAY, FILE_THRESHOLD, FACE_THRESHOLD, \
FACE_THRESHOLD, SESSION_INQUEUE, SESSION_FACESET_MAP, FACE_POST_TIME_MAP, REMOVEABLE_FACESETS
print ('current session in queue:' + str(len(SESSION_INQUEUE)))
logging.info ('BATCH_' + BATCH_ID + ':process timeline for user: ' + username + '...')
'''
1. Detect faces in the 3 pictures and find out their positions and
attributes
2. Create persons using the face_id
3. Create a new group and add those persons in it
4. Train the model
5. asyncronously wait for training to complete
Args:
filename queue that represents a user's timeline
Returns:
groupping session_id
'''
# if session limit reach, wait until vancy appears
while len(SESSION_INQUEUE) == MAX_FACESET:
for session in SESSION_INQUEUE:
try:
rst = api.info.get_session(session_id=session)
except:
# session retrieve failed
# detele corresponding faceset
# delete session
logging.info ('BATCH_' + BATCH_ID + ':retrieve session:'+ session +' failed')
faceset_id = SESSION_FACESET_MAP[session][0]
REMOVEABLE_FACESETS.append(faceset_id)
if delete_faceset(faceset_id):
logging.info ('BATCH_' + BATCH_ID + ':faceset:'+ faceset_id +' delete succeed')
# INQUEUE -= 1
SESSION_INQUEUE.remove(session)
del SESSION_FACESET_MAP[session]
if rst['status'] != 'INQUEUE':
# either succeed or failed
faceset_id = SESSION_FACESET_MAP[session][0]
prev_username = SESSION_FACESET_MAP[session][1]
REMOVEABLE_FACESETS.append(faceset_id)
# delete used faceset before moving to next faceset
if delete_faceset(faceset_id):
logging.info ('BATCH_' + BATCH_ID + ':faceset:'+ faceset_id +' delete succeed')
if session in SESSION_INQUEUE:
SESSION_INQUEUE.remove(session)
del SESSION_FACESET_MAP[session]
# if succeed, process
if rst['result']:
logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + ' groupping result is ready for processing')
USER = process_groupping_result(prev_username, rst['result'])
if USER:
logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + ' writing to file' )
output.write(USER + '\n')
logging.info ('BATCH_' + BATCH_ID + ':processing user: ' + prev_username + ' all completed.')
else:
logging.info ('BATCH_' + BATCH_ID + ':processing user: ' + prev_username + ' no groupped faces')
else:
logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + ' groupping failed')
# if no session complete, before next iteration, sleep 5s
if len(SESSION_INQUEUE) == MAX_FACESET:
logging.info ('BATCH_' + BATCH_ID + ': faceset limit reached, sleep for ' + str(DELAY) + ' s...')
time.sleep(DELAY)
dir_prefix = '/public/lchi3/Pet/dog_data/' + username + '/pics'
file_queue = listdir(dir_prefix)
if len(file_queue) < FILE_THRESHOLD:
logging.info ('BATCH_' + BATCH_ID + ':less than 30 pics, pass')
return
# 1. create a faceset for user timeline
faceset_id = api.faceset.create(name=username)['faceset_id']
# 2. detect faces to each image
# collect all faces' ids across timeline
face_id_str = ""
total_file_counter = 0
total_face_counter = 0
file_counter = 0
batch = 0
start_time = time.time()
for file in file_queue:
total_file_counter += 1
file_counter += 1
logging.info ('BATCH_' + BATCH_ID + ':detecting faces for file. ' + str(total_file_counter))
try:
faces = api.detection.detect(img=File(dir_prefix + '/' + file))['face']
if len(faces) > 0:
logging.info ('BATCH_' + BATCH_ID + ':detecting faces for file. ' + str(total_file_counter) + ' succeed')
# succeed, record it
unixtime = file.split('_')[0]
uid = file.split('_')[1]
pid = file.split('_')[2]
external_face_json = {'uid_pid':uid + '_' + pid, 'resp': faces, 'timestamp':unixtime}
output_external_usage.write(json.dumps(external_face_json) + '\n')
else:
logging.info ('BATCH_' + BATCH_ID + ':detecting faces for file. ' + str(total_file_counter) + ' no face found')
except:
logging.info ('BATCH_' + BATCH_ID + ':detecting faces for file. ' + str(total_file_counter) + ' failed')
continue
total_face_counter += len(faces)
for face in faces:
face_id = face['face_id']
# file name
# unixtime-url.jpg
timestamp = file.split('_')[0]
FACE_POST_TIME_MAP[face_id] = timestamp
face_id_str += (face_id + ',')
if file_counter == 10 and face_id_str == '':
batch += 1
logging.info ('BATCH_' + BATCH_ID + ':no face in the whole batch. ' + str(batch) + ', pass')
file_counter = 0
if file_counter == 10 and face_id_str != '':
batch += 1
# trim the last comma
face_id_str = face_id_str[:-1]
# add faces to faceset
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch))
try:
resp = api.faceset.add_face(face_id=face_id_str, faceset_id=faceset_id)
if not resp['success']:
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch) + 'failed:')
logging.info(resp)
else:
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch) + 'succeed')
except:
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch) + 'failed:')
logging.info (resp)
face_id_str = ""
file_counter = 0
continue
face_id_str = ""
file_counter = 0
if file_counter != 0 and face_id_str != '':
face_id_str = face_id_str[:-1]
try:
resp = api.faceset.add_face(face_id=face_id_str, faceset_id=faceset_id)
if not resp['success']:
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch) + 'failed')
else:
logging.info ('BATCH_' + BATCH_ID + ':adding face batch. ' + str(batch) + 'succeed')
except:
logging.info ('BATCH_' + BATCH_ID + ':adding last batch of faces failed')
face_id_str = ""
file_counter = 0
logging.info ( 'detecting faces completed' )
logging.info ( 'detecting elapsed time: ' + str(time.time() - start_time) + ' s' )
if total_face_counter > FACE_THRESHOLD:
# starts groupping asyncronously
logging.info ('BATCH_' + BATCH_ID + ':sending groupping resquest ...')
try:
resp = api.grouping.grouping(faceset_id=faceset_id)
if resp['session_id']:
session_id = resp['session_id']
logging.info ('BATCH_' + BATCH_ID + ':groupping resquest for user:' + username +' sent successfully.')
# INQUEUE += 1
SESSION_INQUEUE.append(session_id)
# when session complete, delete the corresponding faceset
SESSION_FACESET_MAP[session_id] = [faceset_id, username]
logging.info ('BATCH_' + BATCH_ID + ':session_id: ' + session_id)
return session_id
return
except:
# remove the faceset
REMOVEABLE_FACESETS.append(faceset_id)
delete_faceset(faceset_id)
logging.info ('BATCH_' + BATCH_ID + ':groupping resquest for user:' + username +' sent failed.')
return
else:
# remove the faceset
REMOVEABLE_FACESETS.append(faceset_id)
delete_faceset(faceset_id)
logging.info ('BATCH_' + BATCH_ID + ':too few faces to group, pass')
return
# def process_timeline_captions(captions):
# '''
# process all captions of a user timeline to see
# if there is any repeating keywords
# Args:
# all captions of a timeline
# Returns:
# 0 -> partner!
# 1 -> child!
# 2 -> child and partner!
# '''
def process_signle_person(faces, isUser):
'''
process person face_list
'''
face_ids = ""
# check the face number
# get faces by set
face_count = 0
face_list = []
for face in faces:
face_count += 1
face_ids += ( face['face_id'] + ",")
if face_count == 10:
# trim lost comma
face_ids = face_ids[:-1]
# send request
try:
for face in api.info.get_face(face_id=face_ids)['face_info']:
face_list.append(face)
except:
logging.info ('BATCH_' + BATCH_ID + ':get face failed')
# clean
face_ids = ""
face_count = 0
if face_ids != "":
# trim lost comma
face_ids = face_ids[:-1]
try:
for face in api.info.get_face(face_id=face_ids)['face_info']:
face_list.append(face)
except:
logging.info ('BATCH_' + BATCH_ID + ':get face failed')
if isUser:
attribute = face_list[0]['attribute']
smile = 0
for face in face_list:
smile += float(face['attribute']['smiling']['value'])
smile = smile / len(face_list)
return attribute, smile
else:
# times when this person are posted in user's timeline
time_appeared = []
for face in face_list:
time_appeared.append(FACE_POST_TIME_MAP[face['face_id']])
face_id = faces[0]['face_id']
face_list = api.info.get_face(face_id=face_id)['face_info']
attribute = face_list[0]['attribute']
return attribute, time_appeared
def process_groupping_result(username, rst):
logging.info ('BATCH_' + BATCH_ID + ':processing groupping result for user:' + username)
if len(rst['group']) == 0:
logging.info ('BATCH_' + BATCH_ID + ':no grouped faces for user:' + username)
return None
'''
given the groupping result, get the user's attributes
see if the user has a partner or not
see if the user has child or not
Args:
returned groupping result
Returns:
{}
'''
# sort group list by group len
groups = sorted(rst['group'], key=len)
user = groups.pop()
user_face_ids = ""
## only consider groupped faces
# USER
attribute, smile = process_signle_person(user, True)
USER = {'username': username, 'attribute': attribute, 'ave_smile': smile}
# OTHERS
USER['others'] = []
for other in groups:
attribute, time_appeared = process_signle_person(other, False)
USER['others'].append({'attribute':attribute, 'times': time_appeared})
# return json format
return json.dumps(USER)
def process_tail_sessions():
logging.info ('BATCH_' + BATCH_ID + ':process tail sessions')
global MAX_FACESET, DELAY, FILE_THRESHOLD, FACE_THRESHOLD, \
FACE_THRESHOLD, SESSION_INQUEUE, SESSION_FACESET_MAP, FACE_POST_TIME_MAP, REMOVEABLE_FACESETS
while len(SESSION_INQUEUE) > 0:
temp_len = len(SESSION_INQUEUE)
for session in SESSION_INQUEUE:
rst = api.info.get_session(session_id=session)
if rst['status'] != 'INQUEUE':
# either succeed or failed
faceset_id = SESSION_FACESET_MAP[session][0]
REMOVEABLE_FACESETS.append(faceset_id)
prev_username = SESSION_FACESET_MAP[session][1]
# delete used faceset before moving to next faceset
if session in SESSION_INQUEUE:
SESSION_INQUEUE.remove(session)
del SESSION_FACESET_MAP[session]
delete_faceset(faceset_id)
# INQUEUE -= 1
# if succeed, process
if rst['result']:
logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + ' groupping result is ready for processing')
USER = process_groupping_result(prev_username, rst['result'])
if USER:
logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + 'writing to file' )
output.write(USER + '\n')
logging.info ('BATCH_' + BATCH_ID + ':processing user: ' + prev_username + ' all completed.')
else:
logging.info ('BATCH_' + BATCH_ID + ':processing user: ' + prev_username + ' no groupped faces')
# # USER = process_groupping_result(prev_username, rst['result'])
# logging.info ('BATCH_' + BATCH_ID + ':session('+ prev_username +'): ' + session + 'writing to file' )
# output.write(USER + '\n')
# logging.info ('BATCH_' + BATCH_ID + ':processing user: ' + prev_username + ' all completed.' )
# if no session complete, before next iteration, sleep 5s
if len(SESSION_INQUEUE) == temp_len:
logging.info ('BATCH_' + BATCH_ID + ':sleep for 5s...')
time.sleep(DELAY)
# delete_faceset()
# start_time = time.time()
# # print(process_faces_in_timeline('_alessiadelorenzi_96'))
# # faceset_id = 'da209a060a2a4eb3a009acc2a11c307e'
# # print (api.grouping.grouping(faceset_id=faceset_id))
# print ( process_faces_in_timeline('_alessiadelorenzi_96') )
# print ( 'total elapsed time: ' + str(time.time() - start_time) + ' s' ) |
# -*- coding: utf-8 -*-
'''
博客1:python+opencv实现基于傅里叶变换的旋转文本校正
https://blog.csdn.net/qq_36387683/article/details/80530709
博客2:OpenCV—python 图像矫正(基于傅里叶变换—基于透视变换)
https://blog.csdn.net/wsp_1138886114/article/details/83374333
傅里叶相关知识:
https://blog.csdn.net/on2way/article/details/46981825
频率:对于图像来说就是指图像颜色值的梯度,即灰度级的变化速度
幅度:可以简单的理解为是频率的权,即该频率所占的比例
DFT之前的原图像在x y方向上表示空间坐标,DFT是经过x y方向上的傅里叶变换来统计像素在这两个方向上不同频率的分布情况,
所以DFT得到的图像在x y方向上不再表示空间上的长度,而是频率。
仿射变换与透射变换:
仿射变换和透视变换更直观的叫法可以叫做“平面变换”和“空间变换”或者“二维坐标变换”和“三维坐标变换”.
从另一个角度也能说明三维变换和二维变换的意思,仿射变换的方程组有6个未知数,所以要求解就需要找到3组映射点,
三个点刚好确定一个平面.
透视变换的方程组有8个未知数,所以要求解就需要找到4组映射点,四个点就刚好确定了一个三维空间.
图像旋转算法 数学原理:
https://blog.csdn.net/liyuan02/article/details/6750828
角度angle可以用np.angle()
ϕ=atan(实部/虚部)
numpy包中自带一个angle函数可以直接根据复数的实部与虚部求出角度(默认出来的角度是弧度)。
'''
import cv2 as cv
import numpy as np
import math
from matplotlib import pyplot as plt
def fourier_demo():
#1、读取文件,灰度化
img = cv.imread('img/table-3.png')
cv.imshow('original', img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
#2、图像延扩
# OpenCV中的DFT采用的是快速算法,这种算法要求图像的尺寸是2的、3和5的倍数是处理速度最快。
# 所以需要用getOptimalDFTSize()
# 找到最合适的尺寸,然后用copyMakeBorder()填充多余的部分。
# 这里是让原图像和扩大的图像左上角对齐。填充的颜色如果是纯色,
# 对变换结果的影响不会很大,后面寻找倾斜线的过程又会完全忽略这一点影响。
h, w = img.shape[:2]
new_h = cv.getOptimalDFTSize(h)
new_w = cv.getOptimalDFTSize(w)
right = new_w - w
bottom = new_h - h
nimg = cv.copyMakeBorder(gray, 0, bottom, 0, right, borderType=cv.BORDER_CONSTANT, value=0)
cv.imshow('optim image', nimg)
#3、执行傅里叶变换,并得到频域图像
f = np.fft.fft2(nimg) # 将图像从空间域转到频域
fshift = np.fft.fftshift(f) # 将低频分量移动到中心,得到复数形式(实部、虚部)
magnitude = np.log(np.abs(fshift)) # 用abs()得到实数(imag()得到虚部),取对数是为了将数据变换到0-255,相当与实现了归一化。
# 4、二值化,进行Houge直线检测
# 二值化
magnitude_uint = magnitude.astype(np.uint8) #HougnLinesP()函数要求输入图像必须为8位单通道图像
ret, thresh = cv.threshold(magnitude_uint, thresh=11, maxval=255, type=cv.THRESH_BINARY)
print("ret:",ret)
cv.imshow('thresh', thresh)
print("thresh.dtype:", thresh.dtype)
#霍夫直线变换
lines = cv.HoughLinesP(thresh, 2, np.pi/180, 30, minLineLength=40, maxLineGap=100)
print("len(lines):", len(lines))
# 5、创建一个新图像,标注直线,找出偏移弧度
#创建一个新图像,标注直线
lineimg = np.ones(nimg.shape,dtype=np.uint8)
lineimg = lineimg * 255
piThresh = np.pi/180
pi2 = np.pi/2
print("piThresh:",piThresh)
# 得到三个角度,一个是0度,一个是90度,另一个就是我们需要的倾斜角。
for line in lines:
x1, y1, x2, y2 = line[0]
cv.line(lineimg, (x1, y1), (x2, y2), (0, 255, 0), 2)
if x2 - x1 == 0:
continue
else:
theta = (y2 - y1) / (x2 - x1)
if abs(theta) < piThresh or abs(theta - pi2) < piThresh:
continue
else:
print("theta:",theta)
# 6、计算倾斜角,将弧度转换成角度,并注意误差
angle = math.atan(theta)
print("angle(弧度):",angle)
angle = angle * (180 / np.pi)
print("angle(角度1):",angle)
angle = (angle - 90)/ (w/h)
#由于DFT的特点,只有输出图像是正方形时,检测到的角才是文本真正旋转的角度。
# 但是我们的输入图像不一定是正方形的,所以要根据图像的长宽比改变这个角度。
print("angle(角度2):",angle)
# 7、校正图片
# 先用getRotationMatrix2D()获得一个仿射变换矩阵,再把这个矩阵输入warpAffine(),
# 做一个单纯的仿射变换,得到校正的结果:
center = (w//2, h//2)
M = cv.getRotationMatrix2D(center, angle, 1.0)
rotated = cv.warpAffine(img, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
cv.imshow('line image', lineimg)
cv.imshow('rotated', rotated)
if __name__ == '__main__':
fourier_demo()
cv.waitKey(0)
cv.destroyAllWindows()
|
# Print tempearture.
import requests
try:
location = input('Enter a city name: ')
address = 'https://api.openweathermap.org/data/2.5/weather?q=' + location + '&units=metric&appid=60113b36f0a83502fe59ba9e512b76d4'
data = requests.get(address)
temp = eval(data.text)
print('Temperature of ' + location + ' is ' + str(temp['main']['temp']) + '*c.')
except:
print('Invalild city name!\n') |
# -*- coding: utf-8 -*-
"""Copy in and out (CPIO) archive format files."""
import os
from dtformats import data_format
from dtformats import data_range
from dtformats import errors
class CPIOArchiveFileEntry(data_range.DataRange):
"""CPIO archive file entry.
Attributes:
data_offset (int): offset of the data.
data_size (int): size of the data.
group_identifier (int): group identifier (GID).
inode_number (int): inode number.
mode (int): file access mode.
modification_time (int): modification time, in number of seconds since
January 1, 1970 00:00:00.
path (str): path.
size (int): size of the file entry data.
user_identifier (int): user identifier (UID).
"""
def __init__(self, file_object, data_offset=0, data_size=0):
"""Initializes a CPIO archive file entry.
Args:
file_object (file): file-like object of the CPIO archive file.
data_offset (Optional[int]): offset of the data.
data_size (Optional[int]): size of the data.
"""
super(CPIOArchiveFileEntry, self).__init__(
file_object, data_offset=data_offset, data_size=data_size)
self.group_identifier = None
self.inode_number = None
self.mode = None
self.modification_time = None
self.path = None
self.size = None
self.user_identifier = None
class CPIOArchiveFile(data_format.BinaryDataFile):
"""CPIO archive file.
Attributes:
file_format (str): CPIO file format.
size (int): size of the CPIO file data.
"""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('cpio.yaml')
# TODO: move path into structure.
_CPIO_SIGNATURE_BINARY_BIG_ENDIAN = b'\x71\xc7'
_CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN = b'\xc7\x71'
_CPIO_SIGNATURE_PORTABLE_ASCII = b'070707'
_CPIO_SIGNATURE_NEW_ASCII = b'070701'
_CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM = b'070702'
_CPIO_ATTRIBUTE_NAMES_ODC = (
'device_number', 'inode_number', 'mode', 'user_identifier',
'group_identifier', 'number_of_links', 'special_device_number',
'modification_time', 'path_size', 'file_size')
_CPIO_ATTRIBUTE_NAMES_CRC = (
'inode_number', 'mode', 'user_identifier', 'group_identifier',
'number_of_links', 'modification_time', 'path_size',
'file_size', 'device_major_number', 'device_minor_number',
'special_device_major_number', 'special_device_minor_number',
'checksum')
def __init__(self, debug=False, output_writer=None):
"""Initializes a CPIO archive file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(CPIOArchiveFile, self).__init__(
debug=debug, output_writer=output_writer)
self._file_entries = None
self.file_format = None
self.size = None
def _DebugPrintFileEntry(self, file_entry):
"""Prints file entry debug information.
Args:
file_entry (cpio_new_file_entry): file entry.
"""
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
value_string = f'0x{file_entry.signature:04x}'
else:
value_string = f'{file_entry.signature!s}'
self._DebugPrintValue('Signature', value_string)
if self.file_format not in ('crc', 'newc'):
self._DebugPrintValue('Device number', f'{file_entry.device_number:d}')
self._DebugPrintValue('Inode number', f'{file_entry.inode_number:d}')
self._DebugPrintValue('Mode', f'{file_entry.mode:o}')
self._DebugPrintValue(
'User identifier (UID)', f'{file_entry.user_identifier:d}')
self._DebugPrintValue(
'Group identifier (GID)', f'{file_entry.group_identifier:d}')
self._DebugPrintValue('Number of links', f'{file_entry.number_of_links:d}')
if self.file_format not in ('crc', 'newc'):
self._DebugPrintValue(
'Special device number', f'{file_entry.special_device_number:d}')
self._DebugPrintValue(
'Modification time', f'{file_entry.modification_time:d}')
if self.file_format not in ('crc', 'newc'):
self._DebugPrintValue('Path size', f'{file_entry.path_size:d}')
self._DebugPrintValue('File size', f'{file_entry.file_size:d}')
if self.file_format in ('crc', 'newc'):
self._DebugPrintValue(
'Device major number', f'{file_entry.device_major_number:d}')
self._DebugPrintValue(
'Device minor number', f'{file_entry.device_minor_number:d}')
self._DebugPrintValue(
'Special device major number',
f'{file_entry.special_device_major_number:d}')
self._DebugPrintValue(
'Special device minor number',
f'{file_entry.special_device_minor_number:d}')
self._DebugPrintValue('Path size', f'{file_entry.path_size:d}')
self._DebugPrintValue('Checksum', f'0x{file_entry.checksum:08x}')
def _ReadFileEntry(self, file_object, file_offset):
"""Reads a file entry.
Args:
file_object (file): file-like object.
file_offset (int): offset of the data relative to the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
ParseError: if the file entry cannot be read.
"""
if self.file_format == 'bin-big-endian':
data_type_map = self._GetDataTypeMap('cpio_binary_big_endian_file_entry')
elif self.file_format == 'bin-little-endian':
data_type_map = self._GetDataTypeMap(
'cpio_binary_little_endian_file_entry')
elif self.file_format == 'odc':
data_type_map = self._GetDataTypeMap('cpio_portable_ascii_file_entry')
elif self.file_format in ('crc', 'newc'):
data_type_map = self._GetDataTypeMap('cpio_new_ascii_file_entry')
file_entry, file_entry_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.ParseError((
f'Unable to convert attribute: {attribute_name:s} into an '
f'integer'))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.ParseError((
f'Unable to convert attribute: {attribute_name:s} into an '
f'integer'))
value = setattr(file_entry, attribute_name, value)
if self._debug:
self._DebugPrintFileEntry(file_entry)
path_data = file_object.read(file_entry.path_size)
if self._debug:
self._DebugPrintData('Path data', path_data)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self._debug:
self._DebugPrintValue('Path', path)
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if self._debug:
padding_data = file_object.read(padding_size)
self._DebugPrintData('Path alignment padding', padding_data)
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry(file_object)
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
if self._debug:
file_object.seek(file_offset, os.SEEK_SET)
padding_data = file_object.read(padding_size)
self._DebugPrintData('File data alignment padding', padding_data)
archive_file_entry.size += padding_size
if self._debug:
self._DebugPrintText('\n')
return archive_file_entry
def _ReadFileEntries(self, file_object):
"""Reads the file entries from the cpio archive.
Args:
file_object (file): file-like object.
"""
self._file_entries = {}
file_offset = 0
while file_offset < self._file_size or self._file_size == 0:
file_entry = self._ReadFileEntry(file_object, file_offset)
file_offset += file_entry.size
if file_entry.path == 'TRAILER!!!':
break
if file_entry.path in self._file_entries:
# TODO: alert on file entries with duplicate paths?
continue
self._file_entries[file_entry.path] = file_entry
self.size = file_offset
def Close(self):
"""Closes the CPIO archive file."""
super(CPIOArchiveFile, self).Close()
self._file_entries = None
def FileEntryExistsByPath(self, path):
"""Determines if file entry for a specific path exists.
Args:
path (str): path of the file entry.
Returns:
bool: True if the file entry exists.
"""
if not self._file_entries:
return False
return path in self._file_entries
def GetFileEntries(self, path_prefix=''):
"""Retrieves the file entries.
Args:
path_prefix (Optional[str]): path prefix.
Yields:
CPIOArchiveFileEntry: CPIO archive file entry.
"""
if self._file_entries:
for path, file_entry in self._file_entries.items():
if path.startswith(path_prefix):
yield file_entry
def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a specific path.
Args:
path (str): path of the file entry.
Returns:
CPIOArchiveFileEntry: CPIO archive file entry or None.
"""
if not self._file_entries:
return False
return self._file_entries.get(path, None)
def ReadFileObject(self, file_object):
"""Reads binary data from a file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the format signature is not supported.
"""
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise errors.ParseError('Unsupported CPIO format.')
self._ReadFileEntries(file_object)
# TODO: print trailing data
|
"""model database
Revision ID: 62fba533f72b
Revises: 6eaf085a217c
Create Date: 2020-10-17 08:32:25.493485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '62fba533f72b'
down_revision = '6eaf085a217c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
#!/usr/bin/env python
'''
Entities folder importer.
'''
__author__ = 'Aditya Viswanathan'
__email__ = 'aditya@adityaviswanathan.com'
from entities.db import db as Db
from entities.models import *
from entities.action_executor import ActionExecutor
from entities.test_entities import make_entities
|
#!/usr/bin/python3
"""
Project: 0x0A-python-inheritance.
Task: 4
"""
def inherits_from(obj, a_class):
"""True if it is, False otherwise"""
if not type(obj) is a_class and issubclass(type(obj), a_class):
return True
return False
|
from coco.models import Image, Category, Coco50
from django.shortcuts import render
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse, HttpResponseRedirect
import urllib2
import json
import os
from django.conf import settings
from imgmanip.models import Image
from imgmanip.forms import ImageUploadForm
import numpy as np
attributes_file = open(os.path.join(settings.MEDIA_ROOT, 'attributes/coco_attributes_with_images.json'))
attributes_string = attributes_file.read()
attributes = json.loads(attributes_string)
flickr_urls_file = open(os.path.join(settings.MEDIA_ROOT, 'attributes/coco_flickr_urls.json'))
flickr_urls_string = flickr_urls_file.read()
flickr_urls = json.loads(flickr_urls_string)
anns_file = open(os.path.join(settings.MEDIA_ROOT, 'attributes/coco_anns_grouped.json'))
anns_string = anns_file.read()
anns = json.loads(anns_string)
def index(request):
"""
Renders a page where you can choose to interact with a Coco segmented image
"""
# Load all segmented images for image index page
segmented = Coco50.objects.all()
segmented_imgs = [coco50_obj.image.id for coco50_obj in segmented]
# Render page with the form and all images
context = {'segmented_imgs': segmented_imgs}
return render(request, 'coco/index.html', context)
def category_index(request):
"""
Renders all the categories that are available.
"""
categories = Category.objects.values_list('name')
categories = [c[0] for c in categories]
return render(request, 'coco/category_index.html', {'categories': categories})
def image_index(request):
"""
Renders a few images for a given category
"""
print "REQUEST", request
MAX_IMAGES = 24
images = []
if 'category' in request.GET:
name = request.GET['category']
category = Category.objects.get(name=request.GET['category'])
images = category.images.all()[:MAX_IMAGES]
print category
else:
print "here"
# category = Category.objects.all()[0]
category = Category.objects.all()
images = category.images.all()[:MAX_IMAGES]
# images = category.images.all()[:MAX_IMAGES]
images = [im.url for im in images]
return render(request, 'coco/image_index.html', {'urls': images})
def obj_interact(request, image_id):
"""
Loads image with segmented objects; allows selection of an object of interest
"""
# image = get_object_or_404(Image, pk=image_id)
# print Image.objects.all() # images are all empty
images = Image.objects.all()
# image = images[0]
print "SANITY"
print images[0].id
context = {
'image_id': image_id,
}
return render(request, 'coco/obj_interact.html', context)
def obj_interact2(request, image_id, src_theme, dst_theme):
"""
Loads image with segmented objects; allows selection of an object of interest
"""
# image = get_object_or_404(Image, pk=image_id)
# print Image.objects.all() # images are all empty
images = Image.objects.all()
print "IMAGES", image_id
# Hardcoded for the elephant pic (id = 30065)
# Assume that data is in form: <object_id>:<suggested_edit>
# if request.method == 'POST':
# edits = {}
# if(image_id == 13150):
# edits = {
# "593697": "Replace Object 593697",
# "a": "Replace field"
# }
# elif(image_id == 158754):
# edits = {
# "53902": "Replace Object 53902",
# "a": "Add an object to the scene"
# }
# elif(image_id == 66166):
# edits = {
# "1545437": "Replace Object 1545437",
# "1491604": "Replace Object 1491604"
# }
# elif(image_id == 175479):
# edits = {
# "41747": "Replace Object 41747",
# "a": "Add an object to the scene"
# }
# elif image_id == 100318:
# print "Hi"
# edits = {
# "589830": "Replace Object 589830",
# "588983": "Replace Object 588983",
# "b": "Add an object to the scene"
# }
# print edits
edits = {
"589830": "Replace Object 589830",
"588983": "Replace Object 588983",
"b": "Add an object to the scene"
}
# Hardcoded for the elephant pic (id = 30065)
# Assume that data is in form: <object_id>:[catId1, catId2, catId3]
# Reference category_id2name.json
replacementObjs = {
"589830": [18, 19, 20, 21],
"588983": [20, 21, 23]
}
test = [19, 20];
# image = images[0]
# print images[0].id
context = {
'dst_theme': dst_theme,
'edits': edits,
'image_id': image_id,
'replacement_objs': replacementObjs,
'src_theme': src_theme,
'test': test
}
return render(request, 'coco/obj_interact2.html', context)
def theme_id(request, image_name):
"""
Loads image with segmented objects; allows selection of an object of interest
"""
# image = get_object_or_404(Image, pk=image_id)
print Image.objects.all() # images are all empty
# image = Image.objects.get(id=1)
print("image name", image_name)
images = Image.objects.all()
# Render page with the form and all images
context = {'image_name': image_name}
return render(request, 'coco/theme_id.html', context)
# context = {
# 'image_id': image_id,
# # 'image_id': image_id,
# }
# return render(request, 'coco/theme_id.html', context)
def first_screen(request):
"""
Renders a page where you can either upload an image that will get saved in our database or choose from one of the existing files to play around with.
"""
# Handle image upload
# Image.objects.all().delete() # bad method, but just put this line here to clear the images
if request.method == 'POST':
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
temp1 = str(request.FILES['img_file'])
temp = temp1[:temp1.index(".")]
new_img = Image(img_file = request.FILES['img_file'], img_name = temp)
new_img.save()
url = '/coco/first_screen'
return HttpResponseRedirect(url)
else:
form = ImageUploadForm()
# Load all images for the image index page
images = Image.objects.all()
# for image in images:
# image.refresh_from_db()
# images.refresh_from_db()
# Render page with the form and all images
context = {'images': images, 'form': form}
return render(request, 'coco/first_screen.html', context)
def cat_id2name(cat_id):
cat_id2name_file = open(os.path.join(settings.MEDIA_ROOT, 'attributes/category_id2name.json'))
cat_id2name_string = cat_id2name_file.read()
cat_id2name = json.loads(cat_id2name_string)
cat_name = cat_id2name[str(cat_id)]
return cat_name
def obj_attributes(request, image_id, obj_id, cat_id):
"""
Loads all attributes for the selected object
"""
cat_name = cat_id2name(cat_id)
context = {
'image_id': image_id,
'obj_id': obj_id,
'cat_id': cat_id,
'cat_name': cat_name,
}
return render(request, 'coco/obj_attributes.html', context)
def obj_replacements(request, image_id, obj_id, cat_id, attr_id):
"""
Loads all possible replacement images for the selected object + attribute(s)
"""
# TODO: given obj_id and attr_id, load list of relevant image ids
# get the urls of these image ids from Image database
# pass these into an argument
# load these images in template html file
# crop the images by their polygon coords
cat_name = cat_id2name(cat_id)
# Get replacement images for selected object + attribute(s)
attr_name = attributes[cat_name][int(attr_id)]["attribute"]
repl_images = attributes[cat_name][int(attr_id)]["images"] # Store image ids of valid replacements
print "len(repl_images) 1", len(repl_images)
repl_images = list(set(repl_images)) # Remove duplicates
print "len(repl_images) 2", len(repl_images)
repl_images = [repl_image for repl_image in repl_images if str(repl_image) in anns]
print "len(repl_images) 3", len(repl_images)
# Get replacement image annotation data for selected object + attribute(s)
repl_anns = [anns[str(repl_images[i])] for i in range(len(repl_images))]
repl_urls = {}
repl_polys = {}
repl_bboxes = {} # replacement bounding boxes
for ann in repl_anns:
for obj in ann:
if obj["category_id"] == int(cat_id):
seg = obj["segmentation"][0] # TEMP: just take first segmentation
poly = np.array(seg).reshape((int(len(seg)/2), 2))
poly = [list(poly_row) for poly_row in poly]
# Get the flickr url for the replacement object image
obj_image_id = obj["image_id"]
if str(obj_image_id) in flickr_urls:
repl_urls[obj_image_id] = flickr_urls[str(obj_image_id)]
# Get the polygons and bounding boxes for the replacement object image
repl_polys[obj_image_id] = list(poly)
repl_bboxes[obj_image_id] = obj["bbox"]
break
print "len(repl_urls)", len(repl_urls)
repl_urls = json.dumps(repl_urls)
print "len(repl_polys)", len(repl_polys)
repl_polys = json.dumps(repl_polys)
for obj in anns[str(image_id)]:
if obj["id"] == int(obj_id):
orig_bbox = obj["bbox"]
context = {
'image_id': image_id,
'obj_id': obj_id,
'cat_id': cat_id,
'cat_name': cat_name,
'attr_id': attr_id,
'attr_name': attr_name,
'repl_ids': repl_images,
'repl_urls': repl_urls,
'repl_polys': repl_polys,
'repl_bboxes': repl_bboxes,
'orig_bbox': orig_bbox,
}
return render(request, 'coco/obj_replacements.html', context)
|
from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
recaptcha = RecaptchaField()
submit = SubmitField('Register') |
# coding=utf-8
import os
import sys
import unittest
from time import sleep
from selenium import webdriver
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import user_login
from webTest_pro.common.model.baseUploadFile import add_UploadVideo, add_Streaming, add_ContntVideo
reload(sys)
sys.setdefaultencoding("utf-8")
'''添加节目数据'''
videoDataMp4 = [
{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mp4)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'001.mp4',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'sleepTime': '45'
}]
videoDataAsf = [
{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频asf)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'002.asf',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'sleepTime': '20'
}]
videoData3gp = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频3gp)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'003.3gp',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'sleepTime': '10'
}]
videoDataMpg = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mpg)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'004.mpg',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'sleepTime': '15'
}]
videoDataMov = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频mov)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'005.mov',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'sleepTime': '10'
}]
videoDataWmv = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频wmv)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'006.wm',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'sleepTime': '10'
}]
videoDataFlv = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频flv)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'007.flv',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'sleepTime': '45'
}]
videoDataAvi = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频avi)',
'addFileDesc': u'测试备注信息',
'videoType': u'视频',
'fileName': u'008.avi',
'uploadType': 'video',
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'sleepTime': '10'
}]
videoDataDocx = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档docx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'001.docx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '001.docx',
'sleepTime': '4'
}]
videoDataPptx = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档pptx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'002.pptx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '002.pptx',
'sleepTime': '4'
}]
videoDataPpt = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档ppt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'003.ppt',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '003.ppt',
'sleepTime': '4'
}]
videoDataXlsx = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档xlsx)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'004.xlsx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '004.xlsx',
'sleepTime': '4'
}]
videoDataDoc = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档doc)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'005.doc',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '005.doc',
'sleepTime': '4'
}]
videoDataTxt = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档txt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'006.txt',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006.txt',
'sleepTime': '20'
}]
videoDataZHTxt = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档txt)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'006zh.tx',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006zh.txt',
'sleepTime': '4'
}]
videoDataPdf = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档pdf)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'007.pdf',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '007.pdf',
'sleepTime': '4'
}]
videoDataXls = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名1(文档xls)',
'addFileDesc': u'测试备注信息1',
'videoType': u'文档',
'fileName': u'008.xls',
'uploadType': 'doc',
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '008.xls',
'sleepTime': '4'
}]
videoDataPng = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片png)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner01.png',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner01.png',
'sleepTime': '4'
}]
videoDataJpg = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片jpg)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner01.jpg',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner01.jpg',
'sleepTime': '4'
}]
videoDataJpg2 = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(图片jpg)',
'addFileDesc': u'测试备注信息2',
'videoType': u'图片',
'fileName': u'banner03.jpg',
'uploadType': 'pictrue',
'disk': 'Z:\\testResource\\py\\pic',
'fileNames': 'banner03.jpg',
'sleepTime': '4'
}]
videoDataPNG2 = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(水印)',
'addFileDesc': u'测试备注信息3',
'videoType': u'水印',
'fileName': u'文件名3',
'uploadType': 'watermark',
'disk': 'Z:\\testResource',
'fileNames': '002.PNG',
'sleepTime': '4'
}]
videoDataPNG3 = [{
'addTypeSelect': u'公共资源库',
'addFileN': u'测试节目名2(资料)',
'addFileDesc': u'测试备注信息4',
'videoType': u'资料',
'fileName': u'文件名4',
'uploadType': 'data',
'disk': 'Z:\\testResource',
'fileNames': '002.PNG',
'sleepTime': '4'
}]
'''添加视频任务'''
videoTaskData = [{
'taskName': u'测试任务名1',
'taskRemark': u'测试描述',
'pTypeSelect': u'公共资源库',
'addFileN': u'测试节目名(视频)',
'fileName': u'测试文件名',
'fileType': u'视频',
'fileFormat': u'mp4',
'FileDesc': u'测试描述',
'clarity': '720p',
'startTiem': '00:00:01',
'endTiem': '00:00:30'
}]
'''查询任务列表'''
teskListData = [{'taskName': u'测试任务名1'}]
'''添加流媒体地址管理'''
streamingData = [{'addName': u'19流媒体地址', "ipAdd": init.db_conf["hostadd"], "serverIps": init.streaming_media["serverIps"], "addType": u"内网"}]
'''添加节目数据'''
contntVideoDataMp4 = [
{
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'fileName': '001mp4',
'sleepTime': '45',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataAsf = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'fileName': '002asf',
'sleepTime': '20',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoData3gp = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'fileName': '0033gp',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataMpg = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'fileName': '004mpg',
'sleepTime': '15',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataMov = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'fileName': '005mov',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataWmv = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'fileName': '006wmv',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataFlv = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'fileName': '007flv',
'sleepTime': '45',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataAvi = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'fileName': '008avi',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '视频管理'
}]
contntVideoDataDocx = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '001.docx',
'fileName': '001docx',
'sleepTime': '4',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataPptx = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '002.pptx',
'fileName': '002pptx',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataPpt = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '003.ppt',
'fileName': '003ppt',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataXlsx = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '004.xlsx',
'fileName': '004xlsx',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataDoc = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '005.doc',
'fileName': '005doc',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataTxt = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006.txt',
'fileName': '006txt',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataZHTxt = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '006zh.txt',
'fileName': '006zhtxt',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataPdf = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '007.pdf',
'fileName': '007pdf',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
contntVideoDataXls = [{
'disk': 'Z:\\testResource\\py\\wd',
'fileNames': '008.xls',
'fileName': '008xls',
'sleepTime': '6',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '文档管理'
}]
wcontntVideoDataMp4 = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '001.mp4',
'fileName': '001mp4',
'sleepTime': '45',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataAsf = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '002.asf',
'fileName': '002asf',
'sleepTime': '20',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoData3gp = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '003.3gp',
'fileName': '0033gp',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataMpg = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '004.mpg',
'fileName': '004mpg',
'sleepTime': '15',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataMov = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '005.mov',
'fileName': '005mov',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataWmv = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '006.wmv',
'fileName': '006wmv',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataFlv = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '007.flv',
'fileName': '007flv',
'sleepTime': '45',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
wcontntVideoDataAvi = [{
'disk': 'Z:\\testResource\\py',
'fileNames': '008.avi',
'fileName': '008avi',
'sleepTime': '10',
'gradetype': '小学',
'gradename': '一年级',
'subjectname': '音乐',
'Schapter': '音乐第一章',
'Ssection': '',
'sknow': '',
'remark': '测试描述',
'type_click': '微课管理'
}]
class videoList(unittest.TestCase):
''''节目上传管理'''
def setUp(self):
if init.execEnv['execType'] == 'local':
print "\n", "=" * 20, "local exec testcase", "=" * 19
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
print "start tenantmanger..."
else:
print "\n", "=" * 20, "remote exec testcase", "=" * 18
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
print "start tenantmanger..."
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
print "schoolmanager end!"
print "=" * 60
def test_add_Streaming(self):
'''添加流媒体地址管理'''
print "exec:test_add_Streaming..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in streamingData:
add_Streaming(driver, **itme)
sleep(0.5)
print "exec:test_add_Streaming success."
def test_add_videoMp4(self):
'''添加节目数据'''
print "exec:test_add_videoMp4..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataMp4:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoMp4 success."
def test_add_videoAsf(self):
'''添加节目数据'''
print "exec:test_add_videoAsf..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataAsf:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoAsf success."
def test_add_video3gp(self):
'''添加节目数据'''
print "exec:test_add_video3gp..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoData3gp:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_video3gp success."
def test_add_videoMpg(self):
'''添加节目数据'''
print "exec:test_add_videoMpg..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataMpg:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoMpg success."
def test_add_videoMov(self):
'''添加节目数据'''
print "exec:test_add_videoMov..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataMov:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoMov success."
def test_add_videoWmv(self):
'''添加节目数据'''
print "exec:test_add_videoWmv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataWmv:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoWmv success."
def test_add_videoFlv(self):
'''添加节目数据'''
print "exec:test_add_videoFlv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataFlv:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoFlv success."
def test_add_videoAvi(self):
'''添加节目数据'''
print "exec:test_add_videoAvi..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataAvi:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoAvi success."
def test_add_videoDocx(self):
'''添加节目数据'''
print "exec:test_add_videoDocx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataDocx:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoDocx success."
def test_add_videoPptx(self):
'''添加节目数据'''
print "exec:test_add_videoPptx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPptx:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPptx success."
def test_add_videoPpt(self):
'''添加节目数据'''
print "exec:test_add_videoPpt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPpt:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPpt success."
def test_add_videoXlsx(self):
'''添加节目数据'''
print "exec:test_add_videoXlsx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataXlsx:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoXlsx success."
def test_add_videoDoc(self):
'''添加节目数据'''
print "exec:test_add_videoDoc..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataDoc:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoDoc success."
def test_add_videoTxt(self):
'''添加节目数据'''
print "exec:test_add_videoTxt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataTxt:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoTxt success."
def test_add_videoZHTxt(self):
'''添加节目数据'''
print "exec:test_add_videoZHTxt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataZHTxt:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoZHTxt success."
def test_add_videoPdf(self):
'''添加节目数据'''
print "exec:test_add_videoPdf..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPdf:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPdf success."
def test_add_videoXls(self):
'''添加节目数据'''
print "exec:test_add_videoXls..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataXls:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoXls success."
def test_add_videoPng(self):
'''添加节目数据'''
print "exec:test_add_videoPng..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPng:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPng success."
def test_add_videoJpg(self):
'''添加节目数据'''
print "exec:test_add_videoJpg..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataJpg:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoJpg success."
def test_add_videoJpg2(self):
'''添加节目数据'''
print "exec:test_add_videoJpg2..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataJpg2:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoJpg2 success."
def test_add_videoPNG2(self):
'''添加节目数据'''
print "exec:test_add_videoPNG2..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPNG2:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPNG2 success."
def test_add_videoPNG3(self):
'''添加节目数据'''
print "exec:test_add_videoPNG3..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in videoDataPNG3:
add_UploadVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_videoPNG3 success."
def test_add_contntVideoMp4(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMp4..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataMp4:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMp4 success."
def test_add_contntVideoAsf(self):
'''添加节目数据'''
print "exec:test_add_contntVideoAsf..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataAsf:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoAsf success."
def test_add_contntVideo3gp(self):
'''添加节目数据'''
print "exec:test_add_contntVideo3gp..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoData3gp:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideo3gp success."
def test_add_contntVideoMpg(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMpg..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataMpg:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMpg success."
def test_add_contntVideoMov(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMov..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataMov:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMov success."
def test_add_contntVideoWmv(self):
'''添加节目数据'''
print "exec:test_add_contntVideoWmv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataWmv:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoWmv success."
def test_add_contntVideoFlv(self):
'''添加节目数据'''
print "exec:test_add_contntVideoFlv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataFlv:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoFlv success."
def test_add_contntVideoAvi(self):
'''添加节目数据'''
print "exec:test_add_contntVideoAvi..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataAvi:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoAvi success."
def test_add_contntVideoDocx(self):
'''添加节目数据'''
print "exec:test_add_contntVideoDocx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataDocx:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoDocx success."
def test_add_contntVideoPptx(self):
'''添加节目数据'''
print "exec:test_add_contntVideoPptx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataPptx:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoPptx success."
def test_add_contntVideoPpt(self):
'''添加节目数据'''
print "exec:test_add_contntVideoPpt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataPpt:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoPpt success."
def test_add_contntVideoXlsx(self):
'''添加节目数据'''
print "exec:test_add_contntVideoXlsx..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataXlsx:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoXlsx success."
def test_add_contntVideoDoc(self):
'''添加节目数据'''
print "exec:test_add_contntVideoDoc..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataDoc:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoDoc success."
def test_add_contntVideoTxt(self):
'''添加节目数据'''
print "exec:test_add_contntVideoTxt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataTxt:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoTxt success."
def test_add_contntVideoZHTxt(self):
'''添加节目数据'''
print "exec:test_add_contntVideoZHTxt..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataZHTxt:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoZHTxt success."
def test_add_contntVideoPdf(self):
'''添加节目数据'''
print "exec:test_add_contntVideoPdf..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataPdf:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoPdf success."
def test_add_contntVideoXls(self):
'''添加节目数据'''
print "exec:test_add_contntVideoXls..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in contntVideoDataXls:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoXls success."
def test_add_contntVideoMp4(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMp4..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataMp4:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMp4 success."
def test_add_contntVideoAsf(self):
'''添加节目数据'''
print "exec:test_add_contntVideoAsf..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataAsf:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoAsf success."
def test_add_contntVideo3gp(self):
'''添加节目数据'''
print "exec:test_add_contntVideo3gp..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoData3gp:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideo3gp success."
def test_add_contntVideoMpg(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMpg..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataMpg:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMpg success."
def test_add_contntVideoMov(self):
'''添加节目数据'''
print "exec:test_add_contntVideoMov..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataMov:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoMov success."
def test_add_contntVideoWmv(self):
'''添加节目数据'''
print "exec:test_add_contntVideoWmv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataWmv:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoWmv success."
def test_add_contntVideoFlv(self):
'''添加节目数据'''
print "exec:test_add_contntVideoFlv..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataFlv:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoFlv success."
def test_add_contntVideoAvi(self):
'''添加节目数据'''
print "exec:test_add_contntVideoAvi..."
driver = self.driver
user_login(driver, **init.loginInfo)
for itme in wcontntVideoDataAvi:
add_ContntVideo(driver, **itme)
sleep(0.5)
print "exec:test_add_contntVideoAvi success."
# def test_add_videoTask(self):
# ''''添加视频任务'''
# print "exec:test_add_videoTask"
#
# driver = self.driver
# user_login(driver, **init.loginInfo)
# for itme in videoTaskData:
# add_videoTask(driver, **itme)
# print "exec: test_add_videoTask success."
# sleep(0.5)
#
# def test_search_tesk(self):
# '''查询任务列表'''
# print "exec:test_search_tesk"
# driver = self.driver
# user_login(driver, **init.loginInfo)
# for itme in teskListData:
# select_teskList(driver, **itme)
# print "exec: test_search_tesk success."
# sleep(0.5)
if __name__ == '__main__':
# unittest.main()
driver = webdriver.Chrome()
user_login(driver, **init.loginInfo)
for itme in videoDataPptx:
add_UploadVideo(driver, **itme)
|
import torch
import torch.fx
from torch import nn, Tensor
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def ps_roi_pool(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Pool operator
described in R-FCN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ps_roi_pool)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
return output
class PSRoIPool(nn.Module):
"""
See :func:`ps_roi_pool`.
"""
def __init__(self, output_size: int, spatial_scale: float):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
|
# Copyright (c) 2019 Certis CISCO Security Pte Ltd
# All rights reserved.
#
# This software is the confidential and proprietary information of
# Certis CISCO Security Pte Ltd. ("Confidential Information").
# You shall not disclose such Confidential Information and shall use
# it only in accordance with the terms of the license agreement you
# entered into with Certis CISCO Security Pte Ltd.
import os
import unittest
import pymysql
import urllib.parse
from dak.sql import diff_schema
class TestSQL(unittest.TestCase):
def test_diff_schema_mysql(self):
cfg1 = urllib.parse.parse_qs(os.environ['CONN1'],)
cfg2 = urllib.parse.parse_qs(os.environ['CONN2'])
conn1 = pymysql.connect(cfg1['host'][0],
cfg1['user'][0],
cfg1['password'][0],
cfg1['db'][0],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
conn2 = pymysql.connect(cfg2['host'][0],
cfg2['user'][0],
cfg2['password'][0],
cfg2['db'][0],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
result = diff_schema('mysql', conn1, conn2)
# (count, diffs1, diffs2)
print('DIFFS=%d' % result[0])
for table in result[1]:
fields = result[1][table]
if 'DIFF' in fields:
print('TABLE[%s] - %s' % (table, fields['DIFF']))
elif 'COLUMNS' in fields:
for col in fields['COLUMNS']:
if len(fields['COLUMNS'][col]) > 0:
print('COLUMN[%s.%s] - %s' % (table, col, fields['COLUMNS'][col]))
for table in result[2]:
fields = result[2][table]
if 'DIFF' in fields:
print('TABLE[%s] - %s' % (table, fields['DIFF']))
elif 'COLUMNS' in fields:
for col in fields['COLUMNS']:
if len(fields['COLUMNS'][col]) > 0:
print('COLUMN[%s.%s] - %s' % (table, col, fields['COLUMNS'][col]))
if __name__ == '__main__':
unittest.main()
|
n = int(input())
i = 1
sum_series = 0
while i <= n:
sum_series += 1 / i ** 2
i += 1
print(sum_series)
|
def new_save(name) :
s_file = open("save.txt","w")
#Name, Floor, State
s_file.write(name + "#0#0")
s_file.close()
def load_save() :
s_file = open("save.txt","r")
save_data = s_file.read()
#Stats read and split into list for use
stats = save_data.split("#")
s_file.close()
return stats
def save_game(name,room,state) :
s_file = open("save.txt","w")
#Overrides previous save and writes name#room#state
s_file.write(name + "#" + str(room) + "#" + str(state))
|
import boto3
from botocore.exceptions import ClientError
import time
import sys
bucket_name = sys.argv[1]
prefix = sys.argv[2]
start = time.time()
print('Baseline prep started...')
# Creating a copy of validation set for baseline
s3 = boto3.resource('s3')
bucket_key_prefix = prefix + "/data/val/"
bucket = s3.Bucket(bucket_name)
for s3_object in bucket.objects.filter(Prefix=bucket_key_prefix):
target_key = s3_object.key.replace('data/val/', 'monitoring/baselining/data/').replace('.part', '.csv')
copy_source = {
'Bucket': bucket_name,
'Key': s3_object.key
}
try:
obj = s3.Object(bucket_name, target_key).load()
print('Already Copied {0}'.format(target_key))
except ClientError as e:
print('Copying {0} to {1} ...'.format(s3_object.key, target_key))
s3.Bucket(bucket_name).copy(copy_source, target_key)
end = time.time()
print('Baseline prep complete in: {}'.format(end - start)) |
"""
CCT 建模优化代码
局部坐标系
作者:赵润晓
日期:2021年4月27日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# 为了便于磁场建模、粒子跟踪、束流分析,cctpy 中引入了全局坐标系和局部坐标系的概念
# 各种磁铁都放置在局部坐标系中,而粒子在全局坐标系中运动,为了求磁铁在粒子位置产生的磁场,需要引入局部坐标的概念和坐标变换。
# 局部坐标系有4个参数:原点、x轴方向、y轴方向、z轴方向。注意x轴方向、y轴方向、z轴方向不是互相独立的,可以通过右手法则确定,因此构建一个局部坐标系,需要指定3个参数。
# 注:这里的坐标系都是三维直角坐标系,且无缩放
# 构造一个局部坐标系,需要指定坐标原点,以及 x 轴和 z 轴的方向(y 轴方向随之确定)
# LocalCoordinateSystem() 传参参数如下
# location 全局坐标系中实体位置,默认全局坐标系的远点
# x_direction 局部坐标系 x 方向,默认全局坐标系 x 方向
# z_direction 局部坐标系 z 方向,默认全局坐标系 z 方向
# y 方向由 x 方向和 z 方向计算获得
default_lcs = LocalCoordinateSystem()
print(default_lcs)
# LOCATION=(0.0, 0.0, 0.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
# 坐标平移。构建一个局部坐标系,原点为 (2,2,1),x y z 三个轴的方向和全局坐标系一致
lcs221 = LocalCoordinateSystem(location=P3(2,2,1))
# 定义全局坐标i的点 (2,3,3)
point_gcs_233 = P3(2,3,3)
# 函数 point_to_local_coordinate(global_coordinate_point) 将全局坐标系表示的点 global_coordinate_point 转为局部坐标
point_lcs_233 = lcs221.point_to_local_coordinate(point_gcs_233)
# 查看坐标
print(point_lcs_233)
# (0.0, 1.0, 2.0)
# 函数 point_to_global_coordinate(local_coordinate_point) 将局部坐标系表示的点 local_coordinate_point 转为全局坐标
print(lcs221.point_to_global_coordinate(point_lcs_233))
# (2.0, 3.0, 3.0)
# 函数 vector_to_local_coordinate() 和 vector_to_global_coordinate()
# 因为矢量具有平移不变性,所以和点的行为不同
# 全局坐标系和局部坐标系 lcs221 的转换中,矢量的坐标不变
vector_gcs_233 = P3(2,3,3)
vector_lcs_233 = lcs221.vector_to_local_coordinate(vector_gcs_233)
print(vector_gcs_233,vector_lcs_233)
# (2.0, 3.0, 3.0) (2.0, 3.0, 3.0)
vector_gcs_233 = lcs221.vector_to_global_coordinate(vector_lcs_233)
print(vector_gcs_233)
# (2.0, 3.0, 3.0)
# 函数 __str__() 和 __repr__() 将坐标系转为字符串
# 分别打印局部坐标系的原点、xyz三个轴方向在全局坐标系的坐标
# 下面三个打印结果相同
print(lcs221)
print(lcs221.__str__())
print(lcs221.__repr__())
# LOCATION=(2.0, 2.0, 1.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
# 函数 __eq__() 判断局部两个坐标系是否相同。可以使用 == 符号自动调用
# 本质只对坐标原点和三个方向的相等判断
# 参数 err 指定绝对误差
# msg 如果指定,则判断结果为不相等时,抛出异常
lcs221_little_change = LocalCoordinateSystem(location=P3(2,2,1+1e-6))
print(lcs221==lcs221_little_change)
# True
# 类函数 create_by_y_and_z_direction() 由原点 location y方向 y_direction 和 z方向 z_direction 创建坐标系
lcs_created_by_y_and_z_direction = LocalCoordinateSystem.create_by_y_and_z_direction(
location=P3(1,2,3),
y_direction=P3.x_direct(),
z_direction=P3.y_direct()
)
print(lcs_created_by_y_and_z_direction)
# LOCATION=(1.0, 2.0, 3.0), xi=(0.0, 0.0, 1.0), yi=(1.0, 0.0, 0.0), zi=(0.0, 1.0, 0.0)
# 类函数 global_coordinate_system() 获取全局坐标系,即 LOCATION=(0.0, 0.0, 0.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
print(LocalCoordinateSystem.global_coordinate_system())
# LOCATION=(0.0, 0.0, 0.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
# 函数 copy() 坐标系拷贝,拷贝后的坐标系和原坐标系无依赖关系
lcs221_copied = lcs221.copy()
lcs221_copied.location = P3(111,22,3)
print(lcs221)
print(lcs221_copied)
# LOCATION=(2.0, 2.0, 1.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
# LOCATION=(111.0, 22.0, 3.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
# 细节:
# 1. 创建坐标系时,传入的两个方向需要正交(垂直),若不正交则创建失败,会报错
try:
lcs = LocalCoordinateSystem(x_direction=P3.x_direct(),z_direction=P3.x_direct())
except Exception as e:
print("抓住异常:",e)
# 抓住异常: 创建 LocalCoordinateSystem 对象异常,x_direction(1.0, 0.0, 0.0)和z_direction(1.0, 0.0, 0.0)不正交
# 2. 创建坐标系时,传入的两个方向会自动归一化
lcs = LocalCoordinateSystem(x_direction=P3(x=2),z_direction=P3(z=3))
print(lcs)
# LOCATION=(0.0, 0.0, 0.0), xi=(1.0, 0.0, 0.0), yi=(0.0, 1.0, 0.0), zi=(0.0, 0.0, 1.0)
|
import random
import string
import pandas as pd
import uuid
import os
import git
import urllib.request
import json
from faker import Faker
fake = Faker('es_MX')
# nombres y apellidos
hombres = pd.read_csv('./corpus/hombres.csv')
hombres = hombres.values
mujeres = pd.read_csv('./corpus/mujeres.csv')
mujeres = mujeres.values
apellidos = pd.read_csv('./corpus/apellidos-20.csv')
apellidos = apellidos.values
# descarga los catálogos
#if not os.path.isdir('./catalogos'):
# print('Descargando repositorio de catálogos...')
# git.Git('.').clone('https://github.com/PDNMX/catalogos.git')
# print('Listo!')
# (https://www.inegi.org.mx/app/ageeml/)
#if not os.path.isfile('./catun_localidad.xlsx'):
# print('Descargando catálogo de localidades...')
# urllib.request.urlretrieve('https://www.inegi.org.mx/contenidos/app/ageeml/catuni/loc_mincona/catun_localidad.xlsx',
# './catun_localidad.xlsx')
# print('Listo!')
catun = pd.read_excel('./catun_localidad.xlsx', header=3)
# Marco Geoestadístico (https://www.inegi.org.mx/app/ageeml/)
def get_id():
return str(uuid.uuid1())
def rand_bool():
return random.choice([True, False])
def get_name():
gender = random.choice(['F', 'M'])
name = random.choice(hombres) if gender is 'M' else\
random.choice(mujeres)
name = str(name[0])
return name
def get_last_name():
apellido = random.choice(apellidos)
apellido = str(apellido[0])
return apellido
def get_email(domain):
length = 12
letters = string.ascii_lowercase
user = ''.join(random.choice(letters) for i in range(length))
return "{0}@{1}".format(user, domain)
def get_telephone(type):
prefix = '+52' + ('1' if type == 'celular' else '')
return prefix + str(random.randint(5500000000, 7779999999))
def get_bith_date():
dia = (random.randint(1, 28))
mes = (random.randint(1, 12))
anio = (random.randint(1950, 1999))
dia = "0{0}".format(dia) if dia < 10 else "{0}".format(dia)
mes = "0{0}".format(mes) if mes < 10 else "{0}".format(mes)
return "{0}-{1}-{2}".format(anio, mes, dia)
def get_college():
colleges = [
'Instituto Politécnico Nacional',
'Instituto Tecnológico Autónomo de México',
'Universidad Nacional Autónoma de México',
'Universidad Iberoamericana',
'Universidad de Guadalajara'
]
return random.choice(colleges)
def get_amount(a, b):
return round(random.uniform(a, b), 2)
def get_degree():
degrees = [
'Ingeniería en Sistemas Computacionales',
'Licenciatura en Matemáticas Aplicadas',
'Ingeniería en Computación',
'Ingeniería en Comunicaciones y Electrónica',
'Licenciatura en Derecho',
'Licenciatura en Ciencias Políticas',
'Licenciatura en Física',
'Ingeniería Industrial',
'Ingeniería Civil',
"Licenciatura en Historia",
"Licenciatura en Ciencias de la Comunicación",
"Ingeniería Mecánica",
"Ingeniería Petrolera",
"Ingeniería en Telecomunicaciones",
"Ingeniería Química"
]
return random.choice(degrees)
def get_position():
positions = [
'Enlace de Alto Nivel de Responsabilidad',
'Jefe de Departamento',
'Subdirector de Area',
'Director de Area',
'Director General Adjunto',
'Director General',
'Titular de Unidad'
]
return random.choice(positions)
def lorem_ipsum():
return "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."
def get_address():
rows = len(catun)
index = random.randint(0, rows - 1)
loc = catun.iloc[index]
return {
"pais": {
"valor": "MEXICO",
"codigo": "MX"
},
"entidad_federativa": {
"nom_agee": loc['nom_ent'],
"cve_agee": str(loc['cve_ent'])
},
"municipio": {
"nom_agem": loc['nom_mun'],
"cve_agem": str(loc['cve_mun'])
},
"cp": "55018",
"localidad": {
"nom_loc": loc['nom_loc'],
"cve_loc": str(loc['cve_loc'])
},
"asentamiento": {
"cve_asen": 1,
"nom_asen": "AGUA CLARA",
"cve_tipo_asen": 16
},
"vialidad": {
"tipo_vial": "CALLE",
"nom_vial": fake.street_name()
},
"numExt": "24",
"numInt": "48"
}
def citizenship():
countries = [
{
"valor": "Mexico",
"codigo": "MX"
},
{
"valor": "Australia",
"codigo": "AU"
},
{
"valor": "Bolivia",
"codigo": "BO"
},
{
"valor": "Brazil",
"codigo": "BR"
},
{
"valor": "Canada",
"codigo": "CA"
},
{
"valor": "Chile",
"codigo": "CL"
},
{
"valor": "China",
"codigo": "CN"
},
{
"valor": "Colombia",
"codigo": "CO"
},
{
"valor": "Cuba",
"codigo": "CU"
},
{
"valor": "Findland",
"codigo": "FI"
},
{
"valor":"Venezuela",
"codigo":"VE"
}
]
c1 = random.choice(countries)
c2 = random.choice(countries)
return [c1, c2] if c1.get("codigo") != c2.get("codigo") else [c1]
institutions = [
"ADMINISTRACION DEL PATRIMONIO DE LA BENEFICENCIA PUBLICA",
"ADMINISTRACION FEDERAL DE SERVICIOS EDUCATIVOS EN EL DISTRITO FEDERAL",
"ADMINISTRACION PORTUARIA INTEGRAL DE ALTAMIRA S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE COATZACOALCOS S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE DOS BOCAS S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE ENSENADA S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE GUAYMAS S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE LAZARO CARDENAS S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE MANZANILLO S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE MAZATLAN S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE PROGRESO S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE PUERTO MADERO, S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE PUERTO VALLARTA S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE SALINA CRUZ S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE TAMPICO S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE TOPOLOBAMPO S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE TUXPAN S.A. DE C.V.",
"ADMINISTRACION PORTUARIA INTEGRAL DE VERACRUZ S.A. DE C.V.",
"AEROPUERTO INTERNACIONAL DE LA CIUDAD DE MEXICO S.A. DE C.V.",
"AEROPUERTOS Y SERVICIOS AUXILIARES",
"AGENCIA ESPACIAL MEXICANA",
"AGENCIA MEXICANA DE COOPERACIÓN INTERNACIONAL PARA EL DESARROLLO",
"AGENCIA NACIONAL DE SEGURIDAD INDUSTRIAL Y DE PROTECCIÓN AL MEDIO AMBIENTE DEL SECTOR HIDROCARBUROS",
"AGROASEMEX S.A.",
"APOYOS Y SERVICIOS A LA COMERCIALIZACION AGROPECUARIA",
"ARCHIVO GENERAL DE LA NACION",
"AUTORIDAD FEDERAL PARA EL DESARROLLO DE LAS ZONAS ECONÓMICAS ESPECIALES",
"BANCO DEL AHORRO NACIONAL Y SERVICIOS FINANCIEROS S N C",
"BANCO NACIONAL DE COMERCIO EXTERIOR S.N.C.",
"BANCO NACIONAL DE CREDITO RURAL S.N.C.",
"BANCO NACIONAL DE OBRAS Y SERVICIOS PUBLICOS S.N.C.",
"BANCO NACIONAL DEL EJERCITO FUERZA AEREA Y ARMADA S.N.C.",
"CAMINOS Y PUENTES FEDERALES DE INGRESOS Y SERVICIOS CONEXOS",
"CASA DE MONEDA DE MEXICO",
"CENTRO DE CAPACITACION CINEMATOGRAFICA A.C.",
"CENTRO DE ENSEÑANZA TECNICA INDUSTRIAL.",
"CENTRO DE ESTUDIOS SUPERIORES EN TURISMO",
"CENTRO DE EVALUACION Y DESARROLLO HUMANO",
"CENTRO DE INGENIERIA Y DESARROLLO INDUSTRIAL",
"CENTRO DE INVESTIGACION CIENTIFICA DE YUCATAN A.C.",
"CENTRO DE INVESTIGACION CIENTIFICA Y DE EDUCACION SUPERIOR DE ENSENADA B.C.",
"CENTRO DE INVESTIGACION EN ALIMENTACION Y DESARROLLO A.C.",
"CENTRO DE INVESTIGACION EN GEOGRAFIA Y GEOMATICA ING. JORGE L. TAMAYO A.C.",
"CENTRO DE INVESTIGACION EN MATEMATICAS A.C.",
"CENTRO DE INVESTIGACION EN MATERIALES AVANZADOS S.C.",
"CENTRO DE INVESTIGACION EN QUIMICA APLICADA",
"CENTRO DE INVESTIGACION Y ASISTENCIA EN TECNOLOGIA Y DISEÑO DEL ESTADO DE JALISCO A.C.",
"CENTRO DE INVESTIGACION Y DE ESTUDIOS AVANZADOS DEL INSTITUTO POLITECNICO NACIONAL",
"CENTRO DE INVESTIGACION Y DESARROLLO TECNOLOGICO EN ELECTROQUIMICA S.C.",
"CENTRO DE INVESTIGACION Y DOCENCIA ECONOMICAS A.C.",
"CENTRO DE INVESTIGACION Y SEGURIDAD NACIONAL",
"CENTRO DE INVESTIGACIONES BIOLOGICAS DEL NOROESTE S.C.",
"CENTRO DE INVESTIGACIONES EN OPTICA A.C.",
"CENTRO DE INVESTIGACIONES Y ESTUDIOS SUPERIORES EN ANTROPOLOGIA SOCIAL",
"CENTRO DE PRODUCCION DE PROGRAMAS INFORMATIVOS Y ESPECIALES",
"CENTRO NACIONAL DE CONTROL DE ENERGÍA",
"CENTRO NACIONAL DE CONTROL DE GAS NATURAL",
"CENTRO NACIONAL DE EQUIDAD DE GENERO Y SALUD REPRODUCTIVA",
"CENTRO NACIONAL DE EXCELENCIA TECNOLOGICA EN SALUD",
"CENTRO NACIONAL DE LA TRANSFUSION SANGUINEA",
"CENTRO NACIONAL DE METROLOGIA",
"CENTRO NACIONAL DE PLANEACION, ANALISIS E INFORMACION PARA EL COMBATE A LA DELINCUENCIA",
"CENTRO NACIONAL DE PREVENCION DE DESASTRES",
"CENTRO NACIONAL DE TRASPLANTES",
"CENTRO NACIONAL DE VIGILANCIA EPIDEMIOLOGICA Y CONTRTOL DE ENFERMEDADES",
"CENTRO NACIONAL PARA LA PREVENCION Y CONTROL DEL VIH/SIDA",
"CENTRO NACIONAL PARA LA PREVENCIÓN Y EL CONTROL DE LAS ADICCIONES",
"CENTRO NACIONAL PARA LA SALUD DE LA INFANCIA Y ADOLESCENCIA",
"CENTRO REGIONAL DE ALTA ESPECIALIDAD EN CHIAPAS",
"CENTROS DE INTEGRACION JUVENIL A.C.",
"CFE CORPORATIVO",
"CFE DISTRIBUCIÓN",
"CFE GENERACIÓN I",
"CFE GENERACIÓN II",
"CFE GENERACIÓN III",
"CFE GENERACIÓN IV",
"CFE GENERACIÓN V",
"CFE GENERACIÓN VI",
"CFE SUMINISTRADOR DE SERVICIOS BÁSICOS",
"CFE TRANSMISIÓN",
"CIATEC, A.C. CENTRO DE INNOVACION APLICADA EN TECNOLOGIAS COMPETITIVAS",
"CIATEQ, A.C. CENTRO DE TECNOLOGIA AVANZADA",
"COLEGIO DE BACHILLERES",
"COLEGIO DE POSTGRADUADOS",
"COLEGIO NACIONAL DE EDUCACION PROFESIONAL TECNICA",
"COLEGIO SUPERIOR AGROPECUARIO DEL ESTADO DE GUERRERO",
"COMISION DE APELACION Y ARBITRAJE DEL DEPORTE",
"COMISION DE OPERACION Y FOMENTO DE ACTIVIDADES ACADEMICAS DEL INSTITUTO POLITECNICO NACIONAL",
"COMISIÓN EJECUTIVA DE ATENCIÓN A VÍCTIMAS",
"COMISION FEDERAL DE ELECTRICIDAD",
"COMISION FEDERAL DE MEJORA REGULATORIA",
"COMISION FEDERAL DE TELECOMUNICACIONES",
"COMISION FEDERAL PARA LA PROTECCION CONTRA RIESGOS SANITARIOS",
"COMISION NACIONAL BANCARIA Y DE VALORES",
"COMISION NACIONAL DE ACUACULTURA Y PESCA",
"COMISION NACIONAL DE ARBITRAJE MEDICO",
"COMISION NACIONAL DE AREAS NATURALES PROTEGIDAS",
"COMISION NACIONAL DE BIOETICA",
"COMISION NACIONAL DE CULTURA FISICA Y DEPORTE",
"COMISIÓN NACIONAL DE HIDROCARBUROS",
"COMISION NACIONAL DE LAS ZONAS ARIDAS",
"COMISION NACIONAL DE LIBROS DE TEXTO GRATUITOS",
"COMISION NACIONAL DE LOS SALARIOS MINIMOS",
"COMISION NACIONAL DE PROTECCION SOCIAL EN SALUD",
"COMISION NACIONAL DE SEGURIDAD NUCLEAR Y SALVAGUARDIAS",
"COMISION NACIONAL DE SEGUROS Y FIANZAS",
"COMISION NACIONAL DE VIVIENDA",
"COMISION NACIONAL DEL AGUA",
"COMISION NACIONAL DEL SISTEMA DE AHORRO PARA EL RETIRO",
"COMISION NACIONAL FORESTAL",
"COMISION NACIONAL PARA EL DESARROLLO DE LOS PUEBLOS INDIGENAS",
"COMISION NACIONAL PARA EL USO EFICIENTE DE LA ENERGIA",
"COMISION NAL. PARA LA PROTECCION Y DEFENSA DE LOS USUARIOS DE SERVICIOS FINANCIEROS",
"COMISION PARA LA REGULARIZACION DE LA TENENCIA DE LA TIERRA",
"COMISION PARA PREVENIR Y ERRADICAR LA VIOLENCIA CONTRA LAS MUJERES",
"COMISION REGULADORA DE ENERGIA",
"COMITE NACIONAL MIXTO DE PROTECCION AL SALARIO",
"COMITÉ NACIONAL PARA EL DESARROLLO SUSTENTABLE DE LA CAÑA DE AZÚCAR",
"COMPAÑIA MEXICANA DE EXPLORACIONES S.A. DE C.V.",
"COMPAÑIA OPERADORA DEL CENTRO CULTURAL Y TURISTICO DE TIJUANA S.A. DE C.V.",
"CONSEJERIA JURIDICA DEL EJECUTIVO FEDERAL",
"CONSEJO DE MENORES",
"CONSEJO DE PROMOCION TURISTICA DE MEXICO S.A. DE C.V.",
"CONSEJO NACIONAL DE CIENCIA Y TECNOLOGIA",
"CONSEJO NACIONAL DE EVALUACION DE LA POLITICA DE DESARROLLO SOCIAL",
"CONSEJO NACIONAL DE FOMENTO EDUCATIVO",
"CONSEJO NACIONAL DE NORMALIZACION Y CERTIFICACION DE COMPETENCIA LABORALES",
"CONSEJO NACIONAL PARA EL DESARROLLO Y LA INCLUSIÓN DE LAS PERSONAS CON DISCAPACIDAD",
"CONSEJO NACIONAL PARA LA CULTURA Y LAS ARTES",
"CONSEJO NACIONAL PARA PREVENIR LA DISCRIMINACION",
"COORDINACION GENERAL DE LA COMISION MEXICANA DE AYUDA A REFUGIADOS",
"COORDINACION NACIONAL DEL PROGRAMA DE DESARROLLO HUMANO OPORTUNIDADES",
"CORPORACIÓN ÁNGELES VERDES",
"CORPORACION MEXICANA DE INVESTIGACION EN MATERIALES S.A. DE C.V.",
"DICONSA S.A. DE C.V.",
"EDUCAL S.A. DE C.V.",
"EL COLEGIO DE LA FRONTERA NORTE A.C.",
"EL COLEGIO DE LA FRONTERA SUR",
"EL COLEGIO DE MEXICO, A.C.",
"EL COLEGIO DE MICHOACAN A.C.",
"EL COLEGIO DE SAN LUIS A.C",
"ESTUDIOS CHURUBUSCO AZTECA S.A.",
"EXPORTADORA DE SAL S.A.DE C.V.",
"FERROCARRIL DEL ISTMO DE TEHUANTEPEC S.A. DE C.V.",
"FERROCARRILES NACIONALES DE MEXICO",
"FIDEICOMISO DE FOMENTO MINERO",
"FIDEICOMISO DE FORMACION Y CAPACITACION PARA EL PERSONAL DE LA MARINA MERCANTE NACIONAL",
"FIDEICOMISO DE RIESGO COMPARTIDO",
"FIDEICOMISO FONDO DE CAPITALIZACION E INVERSION DEL SECTOR RURAL",
"FIDEICOMISO FONDO NACIONAL DE FOMENTO EJIDAL",
"FIDEICOMISO FONDO NACIONAL DE HABITACIONES POPULARES",
"FIDEICOMISO PARA LA CINETECA NACIONAL",
"FIDEICOMISO PROMEXICO",
"FINANCIERA RURAL",
"FONATUR CONSTRUCTORA, S.A. DE C.V.",
"FONATUR MANTENIMIENTO TURISTICO, S.A. DE C.V.",
"FONATUR OPERADORA PORTUARIA, S.A. DE C.V.",
"FONATUR PRESTADORA DE SERVICIOS, S.A. DE C.V.",
"FONDO DE CULTURA ECONOMICA",
"FONDO DE EMPRESAS EXPROPIADAS DEL SECTOR AZUCARERO",
"FONDO DE GARANTIA Y FOMENTO PARA LA AGRICULTURA, GANADERIA Y AVICULTURA",
"FONDO DE GARANTIA Y FOMENTO PARA LAS ACTIVIDADES PESQUERAS",
"FONDO DE INFORMACION Y DOCUMENTACION PARA LA INDUSTRIA",
"FONDO DE LA VIVIENDA DEL ISSSTE",
"FONDO DE OPERACION Y FINANCIAMIENTO BANCARIO A LA VIVIENDA",
"FONDO ESPECIAL DE ASISTENCIA TECNICA Y GARANTIA PARA LOS CREDITOS AGROPECUARIOS",
"FONDO ESPECIAL PARA FINANCIAMIENTOS AGROPECUARIOS",
"FONDO NACIONAL DE FOMENTO AL TURISMO",
"FONDO NACIONAL PARA EL FOMENTO DE LAS ARTESANIAS",
"FONDO PARA EL DESARROLLO DE LOS RECURSOS HUMANOS",
"GRUPO AEROPORTUARIO DE LA CIUDAD DE MEXICO S.A. DE C.V.",
"HOSPITAL GENERAL DE MEXICO",
"HOSPITAL GENERAL DR. MANUEL GEA GONZALEZ",
"HOSPITAL INFANTIL DE MEXICO FEDERICO GOMEZ",
"HOSPITAL JUAREZ DE MEXICO",
"HOSPITAL REGIONAL DE ALTA ESPECIALIDAD DE CIUDAD VICTORIA BICENTENARIO 2010",
"HOSPITAL REGIONAL DE ALTA ESPECIALIDAD DE IXTAPALUCA",
"HOSPITAL REGIONAL DE ALTA ESPECIALIDAD DE LA PENINSULA DE YUCATAN",
"HOSPITAL REGIONAL DE ALTA ESPECIALIDAD DE OAXACA",
"HOSPITAL REGIONAL DE ALTA ESPECIALIDAD DEL BAJIO",
"I.I.I. SERVICIOS S.A. DE C.V.",
"IMPRESORA Y ENCUADERNADORA PROGRESO S.A. DE C.V.",
"INSTALACIONES INMOBILIARIAS PARA INDUSTRIAS, S.A. DE C.V.",
"INSTITUTO DE ADMINISTRACION Y AVALUOS DE BIENES NACIONALES",
"INSTITUTO DE CAPACITACION Y PROFESIONALIZACION EN PROCURACION DE JUSTICIA FEDERAL",
"INSTITUTO DE ECOLOGIA A.C. (INV)",
"INSTITUTO DE INVESTIGACIONES DR. JOSE MARIA LUIS MORA",
"INSTITUTO DE INVESTIGACIONES ELECTRICAS",
"INSTITUTO DE LOS MEXICANOS EN EL EXTERIOR",
"INSTITUTO DE SEGURIDAD SOCIAL PARA LAS FUERZAS ARMADAS MEXICANAS",
"INSTITUTO DE SEGURIDAD Y SERVICIOS SOCIALES DE LOS TRABAJADORES DEL ESTADO",
"INSTITUTO DEL FONDO NACIONAL PARA EL CONSUMO DE LOS TRABAJADORES",
"INSTITUTO FEDERAL DE ACCESO A LA INFORMACION PUBLICA",
"INSTITUTO FEDERAL DE TELECOMUNICACIONES",
"INSTITUTO MATIAS ROMERO DE ESTUDIOS DIPLOMATICOS",
"INSTITUTO MEXICANO DE CINEMATOGRAFIA",
"INSTITUTO MEXICANO DE LA JUVENTUD",
"INSTITUTO MEXICANO DE LA PROPIEDAD INDUSTRIAL",
"INSTITUTO MEXICANO DE LA RADIO",
"INSTITUTO MEXICANO DE TECNOLOGIA DEL AGUA",
"INSTITUTO MEXICANO DEL PETROLEO",
"INSTITUTO MEXICANO DEL SEGURO SOCIAL",
"INSTITUTO MEXICANO DEL TRANSPORTE",
"INSTITUTO NACIONAL DE ANTROPOLOGIA E HISTORIA",
"INSTITUTO NACIONAL DE ASTROFISICA OPTICA Y ELECTRONICA",
"INSTITUTO NACIONAL DE BELLAS ARTES Y LITERATURA",
"INSTITUTO NACIONAL DE CANCEROLOGIA",
"INSTITUTO NACIONAL DE CARDIOLOGIA IGNACIO CHAVEZ",
"INSTITUTO NACIONAL DE CIENCIAS MEDICAS Y NUTRICION SALVADOR ZUBIRAN (INV)",
"INSTITUTO NACIONAL DE CIENCIAS PENALES",
"INSTITUTO NACIONAL DE DESARROLLO SOCIAL",
"INSTITUTO NACIONAL DE ECOLOGIA",
"INSTITUTO NACIONAL DE ECOLOGÍA Y CAMBIO CLIMÁTICO",
"INSTITUTO NACIONAL DE ENFERMEDADES RESPIRATORIAS",
"INSTITUTO NACIONAL DE ESTUDIOS HISTORICOS DE LAS REVOLUCIONES DE MEXICO",
"INSTITUTO NACIONAL DE GERIATRÍA",
"INSTITUTO NACIONAL DE INFRAESTRUCTURA FÍSICA EDUCATIVA",
"INSTITUTO NACIONAL DE INVESTIGACIONES FORESTALES AGRICOLAS Y PECUARIAS",
"INSTITUTO NACIONAL DE INVESTIGACIONES NUCLEARES",
"INSTITUTO NACIONAL DE LA ECONOMÍA SOCIAL",
"INSTITUTO NACIONAL DE LA PESCA",
"INSTITUTO NACIONAL DE LAS MUJERES",
"INSTITUTO NACIONAL DE LAS PERSONAS ADULTAS MAYORES",
"INSTITUTO NACIONAL DE LENGUAS INDIGENAS",
"INSTITUTO NACIONAL DE MEDICINA GENOMICA",
"INSTITUTO NACIONAL DE MIGRACION",
"INSTITUTO NACIONAL DE NEUROLOGIA Y NEUROCIRUGIA DR. MANUEL VELASCO SUAREZ",
"INSTITUTO NACIONAL DE PEDIATRIA",
"INSTITUTO NACIONAL DE PERINATOLOGIA ISIDRO ESPINOSA DE LOS REYES",
"INSTITUTO NACIONAL DE PSIQUIATRIA RAMON DE LA FUENTE MUÑIZ",
"INSTITUTO NACIONAL DE REHABILITACION",
"INSTITUTO NACIONAL DE SALUD PUBLICA",
"INSTITUTO NACIONAL DEL DERECHO DE AUTOR",
"INSTITUTO NACIONAL PARA EL DESARROLLO DE CAPACIDADES DEL SECTOR RURAL A.C.",
"INSTITUTO NACIONAL PARA EL FEDERALISMO Y EL DESARROLLO MUNICIPAL",
"INSTITUTO NACIONAL PARA LA EDUCACION DE LOS ADULTOS",
"INSTITUTO NACIONAL PARA LA EVALUACION DE LA EDUCACION",
"INSTITUTO PARA EL DESARROLLO TECNICO DE LAS HACIENDAS PUBLICAS",
"INSTITUTO PARA LA PROTECCION AL AHORRO BANCARIO",
"INSTITUTO POLITECNICO NACIONAL",
"INSTITUTO POTOSINO DE INVESTIGACION CIENTIFICA Y TECNOLOGICA, A.C.",
"LABORATORIOS DE BIOLOGICOS Y REACTIVOS DE MEXICO S.A. DE C.V.",
"LICONSA S.A. DE C.V.",
"LOTERIA NACIONAL PARA LA ASISTENCIA PUBLICA",
"NACIONAL FINANCIERA S.N.C.",
"NOTIMEX, AGENCIA DE NOTICIAS DEL ESTADO MEXICANO",
"NOTIMEX S.A. DE C.V.",
"PATRONATO DE OBRAS E INSTALACIONES DEL INSTITUTO POLITECNICO NACIONAL",
"PEMEX-EXPLORACION Y PRODUCCION",
"PEMEX-GAS Y PETROQUIMICA BASICA",
"PEMEX-PETROQUIMICA",
"PEMEX-REFINACION",
"PETROLEOS MEXICANOS",
"P.M.I. COMERCIO INTERNACIONAL S.A. DE C.V.",
"POLICIA FEDERAL",
"PRESIDENCIA DE LA REPUBLICA",
"PREVENCION Y READAPTACION SOCIAL",
"PROCURADURIA AGRARIA",
"PROCURADURIA DE LA DEFENSA DEL CONTRIBUYENTE",
"PROCURADURIA FEDERAL DE LA DEFENSA DEL TRABAJO",
"PROCURADURIA FEDERAL DE PROTECCION AL AMBIENTE",
"PROCURADURIA FEDERAL DEL CONSUMIDOR",
"PROCURADURIA GENERAL DE LA REPUBLICA",
"PRODUCTORA NACIONAL DE BIOLOGICOS VETERINARIOS",
"PRONOSTICOS PARA LA ASISTENCIA PUBLICA",
"RADIO EDUCACION",
"REGISTRO AGRARIO NACIONAL",
"SECCION MEXICANA DE LA COMISION INTERNACIONAL DE LIMITES Y AGUAS MEXICO-ESTADOS UNIDOS DE AMERICA",
"SECCION MEXICANA DE LA COMISION INTERNACIONAL DE LIMITES Y AGUAS MEXICO-GUATEMALA-BELICE",
"SECRETARIA DE AGRICULTURA GANADERIA DESARROLLO RURAL PESCA Y ALIMENTACION",
"SECRETARIA DE COMUNICACIONES Y TRANSPORTES",
"SECRETARÍA DE CULTURA",
"SECRETARIA DE DESARROLLO AGRARIO, TERRITORIAL Y URBANO",
"SECRETARIA DE DESARROLLO SOCIAL",
"SECRETARIA DE ECONOMIA",
"SECRETARIA DE EDUCACION PUBLICA",
"SECRETARIA DE ENERGIA",
"SECRETARIA DE GOBERNACION",
"SECRETARIA DE HACIENDA Y CREDITO PUBLICO",
"SECRETARIA DE LA DEFENSA NACIONAL",
"SECRETARIA DE LA FUNCION PUBLICA",
"SECRETARIA DE MARINA",
"SECRETARIA DE MEDIO AMBIENTE Y RECURSOS NATURALES",
"SECRETARIA DE RELACIONES EXTERIORES",
"SECRETARIA DE SALUD",
"SECRETARIA DE TURISMO",
"SECRETARIA DEL TRABAJO Y PREVISION SOCIAL",
"SECRETARÍA EJECUTIVA DEL SISTEMA NACIONAL ANTICORRUPCIÓN",
"SECRETARIA GENERAL DEL CONSEJO NACIONAL DE POBLACION",
"SECRETARIA TECNICA DE LA COMISION CALIFICADORA DE PUBLICACIONES Y REVISTAS ILUSTRADAS",
"SECRETARIADO EJECUTIVO DEL SISTEMA NACIONAL ANTICORRUPCIÓN",
"SECRETARIADO EJECUTIVO DEL SISTEMA NACIONAL DE SEGURIDAD PUBLICA",
"SERVICIO DE ADMINISTRACION TRIBUTARIA",
"SERVICIO DE ADMINISTRACION Y ENAJENACION DE BIENES",
"SERVICIO DE INFORMACION AGROALIMENTARIA Y PESQUERA",
"SERVICIO DE PROTECCIÓN FEDERAL",
"SERVICIO GEOLOGICO MEXICANO",
"SERVICIO NACIONAL DE INSPECCION Y CERTIFICACION DE SEMILLAS",
"SERVICIO NACIONAL DE SANIDAD INOCUIDAD Y CALIDAD AGROALIMENTARIA",
"SERVICIO POSTAL MEXICANO",
"SERVICIOS A LA NAVEGACION EN EL ESPACIO AEREO MEXICANO",
"SERVICIOS AEROPORTUARIOS DE LA CIUDAD DE MEXICO S.A. DE C.V.",
"SERVICIOS DE ALMACENAMIENTO DEL NORTE S.A.",
"SERVICIOS DE ATENCION PSIQUIATRICA",
"SISTEMA NACIONAL PARA EL DESARROLLO INTEGRAL DE LA FAMILIA",
"SISTEMA PÚBLICO DE RADIODIFUSIÓN DEL ESTADO MEXICANO",
"SOCIEDAD HIPOTECARIA FEDERAL S.N.C.",
"TALLERES GRAFICOS DE MEXICO",
"TECNOLOGICO NACIONAL DE MEXICO",
"TELECOMUNICACIONES DE MEXICO",
"TELEVISION METROPOLITANA S.A. DE C.V.",
"TRANSPORTADORA DE SAL S.A. DE C.V.",
"TRIBUNAL FEDERAL DE CONCILIACION Y ARBITRAJE",
"TRIBUNAL FEDERAL DE JUSTICIA FISCAL Y ADMINISTRATIVA CON SEDE EN EL DISTRITO FEDERAL",
"TRIBUNAL SUPERIOR AGRARIO.",
"TRIBUNALES UNITARIOS AGRARIOS",
"UNIVERSIDAD ABIERTA Y A DISTANCIA DE MÉXICO",
"UNIVERSIDAD AUTONOMA AGRARIA ANTONIO NARRO",
"UNIVERSIDAD AUTONOMA DE CHAPINGO",
"UNIVERSIDAD AUTONOMA METROPOLITANA",
"UNIVERSIDAD PEDAGOGICA NACIONAL",
"XE-IPN CANAL 11"
]
def get_institution():
return random.choice(institutions)
with open("./catalogs/catRelacionPersona.json") as relacion_persona:
cat_relacion_persona = json.load(relacion_persona)
with open("./catalogs/catTipoApoyo.json") as tipo_apoyo:
cat_tipo_apoyo = json.load(tipo_apoyo)
def dependiente():
return {
"nombre_personal": {
"nombres": get_name(),
"primer_apellido": get_last_name(),
"segundo_apellido": get_last_name()
},
"tipo_relacion": random.choice(cat_relacion_persona),
"nacionalidades": citizenship(),
"curp": fake.curp(),
"rfc": fake.rfc(),
"fecha_nacimiento": get_bith_date(),
"numero_identificacion_nacional": "ABCD1234",
"habita_domicilio_declarante": rand_bool(),
"domicilio": get_address(),
"medio_contacto": get_email('coldmailcom'),
"ingresos_propios": True,
"ocupacion_profesion": "Administrador de empresas",
"sector_industria": {
"codigo": "SFS",
"valor": "Servicios de salud y asistencia social"
},
"proveedor_contratista_gobierno": True,
"tiene_intereses_mismo_sector_declarante": True,
"desarrolla_cabildeo_sector_declarante": {
"respuesta": True,
"observaciones": lorem_ipsum()
},
"beneficiario_programa_publico": [{
"nombre_programa": "Prospera",
"institucion_otorga_apoyo": get_institution(),
"tipo_apoyo": random.choice(cat_tipo_apoyo),
"valor_apoyo": random.randint(10000, 100000)
}],
"observaciones": lorem_ipsum()
}
def bien_mueble_registrable():
return {
"id": 123,
"tipo_operacion": {
"codigo": "INCP",
"valor": "Incorporacion"
},
"tipo_bien_mueble": {
"codigo": "VEH",
"valor": "Vehiculo"
},
"marca": random.choice (["BMW", "MASERATI","NISSAN", "KIA", "FERRARI", "JAGUAR", "FORD", "JEEP"]),
"submarca": "RS-122234",
"modelo": 2018,
"numero_serie": "6545243-4334",
"lugar_registro": {
"pais": {
"valor": "MEXICO",
"codigo": "MX"
},
"entidad": {
"nom_agee": "MEXICO",
"cve_agee": "15"
}
},
"titular_bien": {
"codigo": "DECL",
"valor": "Declarante"
},
"porcentaje_propiedad": 70,
"nombres_copropietarios": [
get_name()+" "+get_last_name()+" "+get_last_name()
],
"numero_registro_vehicular": 455000,
"forma_adquisicion": {
"codigo": "CES",
"valor": "Cesion"
},
"nombre_denominacion_adquirio": get_name()+" "+get_last_name()+" "+get_last_name(),
"rfc_quien_adquirio": fake.rfc(),
"relacion_persona_quien_adquirio": random.choice(cat_relacion_persona),
"sector_industria": {
"codigo": "SFS",
"valor": "Servicios de salud y asistencia social"
},
"fecha_adquisicion": get_bith_date(),
"precio_adquisicion": {
"valor": 4000,
"moneda": {
"codigo": "MXN",
"moneda": "MXN"
}
},
"observaciones": lorem_ipsum()
}
with open('./catalogs/catTipoBienInmueble.json') as inmuebles:
cat_bien_inmueble = json.load(inmuebles)
#cat_bien_inmueble
with open('./catalogs/catFormaAdquisicion.json') as forma_adquisicion:
cat_forma_adquisicion = json.load(forma_adquisicion)
def bien_inmueble():
inmueble = {
"id": 123,
"tipo_operacion": {
"codigo": "INCP",
"valor": "Incorporacion"
},
"tipo_bien": random.choice(cat_bien_inmueble),
"superficie_terreno": random.randint(300, 600),
"superficie_construccion": random.randint(70, 150),
"titular": {
"codigo": "DECL",
"valor": "Declarante"
},
"porcentaje_propiedad": random.randint(10,70),
"nombre_copropietario": {
"nombres": get_name(),
"primer_apellido": get_last_name(),
"segundo_apellido": get_last_name()
},
"identificacion_bien": {
"numero_escritura_publica": random.randint(100000,99999999),
"numero_registro_publico": random.randint(100000,99999999),
"folio_real": "AAC"+ str(random.randint(10000, 100000)),
"fecha_contrato": "2010-07-26" ###
},
"domicilio_bien": get_address(),
"forma_adquisicion": random.choice(cat_forma_adquisicion),
"nombre_denominacion_quien_adquirio": get_name() + " " + get_last_name() + " " + get_last_name(),
"rfc_quien_adquirio": fake.rfc(),
"curp_quien_adquirio": fake.curp(),
"relacion_persona_adquirio": random.choice(cat_relacion_persona),
"sector_industria": {
"codigo": "SFS",
"valor": "Servicios de salud y asistencia social"
},
"fecha_adquisicion": get_bith_date(),
"precio_adquisicion": {
"valor": random.randint(100000, 20000000),
"moneda": {
"codigo": "MXN",
"moneda": "MXN"
}
},
"valor_catastral": random.randint(100000, 20000000),
"observaciones": lorem_ipsum()
}
return inmueble
def nivel_gobierno():
niveles = [
{
"codigo": "EST",
"valor": "Estatal"
},
{
"codigo": "FED",
"valor": "Federal"
},
{
"codigo": "MUN",
"valor": "Municipal"
}
]
return random.choice(niveles)
def grados_academicos():
grados = [
{
"codigo": "PREE",
"valor": "Preescolar"
},
{
"codigo": "PRIM",
"valor": "Primaria"
},
{
"codigo": "SECU",
"valor": "Secundaria"
},
{
"codigo": "BACH",
"valor": "Bachillerato"
},
{
"codigo": "LICE",
"valor": "Licenciatura"
},
{
"codigo": "MAES",
"valor": "Maestría"
},
{
"codigo": "DOCT",
"valor": "Doctorado"
}
]
return random.choice(grados) |
import os
import sys
current_path = sys.argv[1]
key_word = sys.argv[2]
def search(path, keyword):
dirs = os.listdir(path)
for d in dirs:
abs_path = os.path.abspath(d)
if os.path.isfile(d):
if(abs_path.find(keyword) > 0):
print abs_path
else:
pass
else:
search(abs_path, keyword)
if(__name__ == '__main__'):
search(current_path, key_word)
|
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.ticker
import seaborn as sns
import conic_parameters
plotfile = sys.argv[0].replace('.py', '.pdf')
sns.set_style('white')
fig, axes = plt.subplots(3, 3, figsize=(9, 9), sharex=True, sharey=True)
incs_deg = 10.0*np.arange(9)
nbeta = 30
#betas = np.logspace(-5.0, -0.5, nbeta)
betas = np.linspace(0.003, 0.5, nbeta)**2
XI_LIST = [None, 1.0, 0.8, 0.4]
nxi = len(XI_LIST)
Rc_grid = np.linspace(0.0, 10.0, 2000)
R90_T0_grid = np.sqrt(2*Rc_grid)
R90_T1_grid = np.sqrt(2*Rc_grid - 1.0)
R90_T1_grid[~np.isfinite(R90_T1_grid)] = 0.0
cols = sns.color_palette('magma', n_colors=nxi)
for ax, inc_deg in zip(axes.flat, incs_deg):
ax.fill_between(Rc_grid, R90_T1_grid, R90_T0_grid, color='k', alpha=0.2)
ax.fill_between(Rc_grid, R90_T0_grid, color='k', alpha=0.1)
ax.plot(Rc_grid, R90_T0_grid, c='k', lw=0.5)
ax.axhline(1.0, lw=0.5, alpha=0.5, color='k', zorder=-1)
ax.axvline(1.0, lw=0.5, alpha=0.5, color='k', zorder=-1)
ax.plot([0.0, 10.0], [0.0, 10.0], lw=0.5, alpha=0.5, color='k', zorder=-1)
for xi, col in list(zip(XI_LIST, cols)):
for beta in betas:
# Fit to head and analytic fit to fit to tail
ht = conic_parameters.HeadTail(beta, xi=xi, xmin=0.0, method='analytic fit')
# Parameters for head conic
T_h = ht.sig_h*ht.tau_h**2
tilde_Rc_h = ht.A_h
R0_h = 1.0
R90_h = ht.R90
# Parameters for tail conic
T_t = -ht.tau_t**2
R0_t = ht.x0_t - ht.a_t
# Equation E from notes
tilde_Rc_t = np.abs(T_t)*ht.a_t/R0_t
R90_t = R0_t * np.sqrt(2*tilde_Rc_t - T_t)
T_combine = 2*tilde_Rc_h - (R90_t / R0_h)**2
inc = np.radians(inc_deg)
# Projected head quantities as functions of inc
f_h = np.sqrt(1.0 + T_h * np.tan(inc)**2)
tilde_Rc_h_prime = tilde_Rc_h / (
np.cos(inc)**2 * f_h * (
1.0 + (tilde_Rc_h / T_h) * (f_h - 1.0)
)
)
T_h_prime = T_h / (np.cos(inc)**2 * f_h**2)
R0_h_prime = R0_h * np.cos(inc) * (
1.0 + (tilde_Rc_h / T_h) * (f_h - 1.0)
)
R90_h_prime = R0_h_prime * np.sqrt(2*tilde_Rc_h_prime - T_h_prime)
# Projected tail quantities as functions of inc
f_t = np.sqrt(1.0 + T_t * np.tan(inc)**2)
# Equation B from notes
T_t_prime = T_t / f_t**2 / np.cos(inc)**2
# Equation D from notes
R0_t_prime = R0_t * np.cos(inc) * (
1.0 + (tilde_Rc_t / T_t) * (f_t - 1.0)
)
# Equation C from notes
tilde_Rc_t_prime = tilde_Rc_t / (
np.cos(inc)**2 * f_t * (
1.0 + (tilde_Rc_t / T_t) * (f_t - 1.0)
)
)
# Equation A from notes
R90_t_prime = R0_t_prime * np.sqrt(2*tilde_Rc_t_prime - T_t_prime)
# Finally, the combined discriminant (equation F from notes)
T_combine_prime = 2*tilde_Rc_h_prime - (R90_t_prime / R0_h_prime)**2
if inc_deg < 30.0:
# Plot the head for low inclinations
y = R90_h_prime/R0_h_prime
else:
# Plot the tail for high inclinations
y = R90_t_prime/R0_h_prime
ax.scatter([tilde_Rc_h_prime], [y],
c=col, edgecolors='none',
marker='o', s=25*R0_h_prime/R0_h, alpha=0.4)
ax.text(3.0, 0.5, rf'$i = {inc_deg:.0f}^\circ$',
bbox={'facecolor': 'w', 'alpha': 0.8, 'edgecolor': 'none'})
axes[-1, 0].set(
yscale='linear',
xscale='linear',
xlim=[0.0, 5.1],
ylim=[0.0, 5.1],
xlabel=r"$\widetilde{R}_{c}{}'$",
ylabel=r"$\widetilde{R}_{90}{}'$",
)
fig.tight_layout()
fig.savefig(plotfile)
print(plotfile, end='')
|
#!/usr/bin/env python
import boto3
from botocore.client import Config
import csv
from dateutil.parser import parse
import datetime
import os
from collections import OrderedDict
ordered_fieldnames = OrderedDict([('CreationDate', None),('SnapshotId',None),('SnapshotVolumeSize',None),('SnapshotTags',None)])
ec2 = boto3.client('ec2', region_name='us-west-2')
paginator = ec2.get_paginator('describe_snapshots')
def find_snapshots(snapId, snapshots):
for snap in snapshots:
if snap.get('SnapshotId') == snapId:
return snap
snap_data = list()
for snap in snapshots:
if 'SnapshotId' in volume['Ebs']:
data = {}
snapId = volume['Ebs']['SnapshotId']
snapshot = find_snapshot(snapId, allSnapshots)
data['SnapshotTags'] = str(snapshot.get('Tags'))
snap_data.append(data)
print 'Writing data to file...'
with open('SnapshotOutput.csv', 'wb') as f: #name of file saved to same directory as script
w = csv.DictWriter(f, fieldnames=ordered_fieldnames)
w.writeheader()
for item in snap_data:
w.writerow(item)
f.close()
print 'Finished writing to file: SnapshotOutput.csv'
|
import string
from words import choose_word
from images import IMAGES
'''
Important instruction
* function and variable name snake_case -> is_prime
* contant variable upper case PI
'''
def is_word_guessed(secret_word, letters_guessed):
'''
secret_word: word guess by the user
letters_guessed: list hold all the word guess by the user
returns:
return True (if user guess the world correctly )
return False (wrong selection)
'''
for s in secret_word:
if s not in letters_guessed:
return False
return True
# if you want to test this function please call function -> get_guessed_word("kindness", [k, n, d])
def get_guessed_word(secret_word, letters_guessed):
'''
secret_word: word guess by the user
letters_guessed: list hold all the word guess by the user
returns:
return string which contain all the right guessed characters
Example :-
if secret_word -> "kindness" and letters_guessed = [k, n, s]
return "k_n_n_ss"
'''
index = 0
guessed_word = ""
while (index < len(secret_word)):
if secret_word[index] in letters_guessed:
guessed_word += secret_word[index]
else:
guessed_word += "_"
index += 1
return guessed_word
def get_available_letters(letters_guessed):
'''
letters_guessed: list contains all guessed characters
returns:
it return string which contains all characters except guessed letters
Example :-
letters_guessed = ['e', 'a'] then
return sting is -> `bcdfghijklmnopqrstuvwxyz`
'''
letters_left = string.ascii_lowercase
letters_left = [s for s in letters_left if s not in letters_guessed]
return ''.join(letters_left)
def hangman_image(num):
return IMAGES[num]
def isValidInput(letter):
if len(letter) != 1:
return False
return letter.isalpha()
def get_hint(secret_word, letters_guessed):
for s in secret_word:
if s not in letters_guessed:
letters_guessed.append(s)
return letters_guessed
def hangman(secret_word):
'''
secret_word (string) : secret word to guessed by the user.
Steps to start Hangman:
* In the beginning of the game user will know about the total characters in the secret_word
* In each round user will guess one character
* After each character give feedback to the user right or wrong
* Display partial word guessed by the user and use underscore in place of not guess word
'''
print("Welcome to the game, Hangman!")
print("I am thinking of a word that is {} letters long.".format(
str(len(secret_word))), end='\n\n')
letters_guessed = []
remaining_lives = 8
hint_used = False
while remaining_lives > 0:
available_letters = get_available_letters(letters_guessed)
print("Available letters: {} ".format(available_letters))
print("Remainging lives " + str(remaining_lives))
guess = input("Please guess a letter: ")
letter = guess.lower()
if letter == "hint" and not hint_used:
if hint_used == False:
letters_guessed = get_hint(secret_word, letters_guessed)
print("{} ".format(get_guessed_word(secret_word, letters_guessed)) + "\n")
hint_used = True
continue
else:
print("You have finished your all hints" + "\n")
continue
if not isValidInput(letter):
print("Invalid Input" + "\n" + "Please try again" + "\n")
continue
if letter in secret_word:
letters_guessed.append(letter)
print("Good guess: {} ".format(
get_guessed_word(secret_word, letters_guessed)))
if is_word_guessed(secret_word, letters_guessed) == True:
print(" * * Congratulations, you won! * * ", end='\n\n')
break
else:
print("Oops! That letter is not in my word: {} ".format(
get_guessed_word(secret_word, letters_guessed)))
image = hangman_image(8-remaining_lives)
print(image)
remaining_lives -= 1
letters_guessed.append(letter)
print("")
# Load the list of words into the variable wordlist
# So that it can be accessed from anywhere in the program
secret_word = choose_word()
hangman(secret_word)
|
"""
Miscellaneous utilities
"""
import sys
from ..exceptions import GMTOSError, GMTCLibError
def clib_extension(os_name=None):
"""
Return the extension for the shared library for the current OS.
.. warning::
Currently only works for OSX and Linux.
Returns
-------
os_name : str or None
The operating system name as given by ``sys.platform``
(the default if None).
Returns
-------
ext : str
The extension ('.so', '.dylib', etc).
"""
if os_name is None:
os_name = sys.platform
# Set the shared library extension in a platform independent way
if os_name.startswith('linux'):
lib_ext = 'so'
elif os_name == 'darwin':
# Darwin is OSX
lib_ext = 'dylib'
else:
raise GMTOSError('Unknown operating system: {}'.format(sys.platform))
return lib_ext
def check_status_code(status, function):
"""
Check if the status code returned by a function is non-zero.
Parameters
----------
status : int or None
The status code returned by a GMT C API function.
function : str
The name of the GMT function (used to raise the exception if it's a
non-zero status code).
Raises
------
GMTCLibError
If the status code is non-zero.
"""
if status is None or status != 0:
raise GMTCLibError(
'Failed {} with status code {}.'.format(function, status))
|
import pandas as pd
import csv
roster = pd.read_csv('FIFA17final.csv',encoding = 'utf8')
# roster.describe()
print(roster[1:10]) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
try:
from django.http import JsonResponse
except ImportError:
from django.http import HttpResponse
def JsonResponse(response_data, *args, **kwargs):
return HttpResponse(json.dumps(response_data), *args, content_type='application/json', **kwargs)
|
"""
参考链接:
https://blog.csdn.net/HuangZhang_123/article/details/80660688
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
def orb_match(img1, img2):
orb = cv2.ORB_create(nfeatures=50)
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
# 暴力匹配BFMatcher,遍历描述符,确定描述符是否匹配,然后计算匹配距离并排序
# BFMatcher函数参数:
# normType:NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2。
# NORM_L1和NORM_L2是SIFT和SURF描述符的优先选择,NORM_HAMMING和NORM_HAMMING2是用于ORB算法
bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING, crossCheck=False)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
# matches是DMatch对象,具有以下属性:
# DMatch.distance - 描述符之间的距离。 越低越好。
# DMatch.trainIdx - 训练描述符中描述符的索引
# DMatch.queryIdx - 查询描述符中描述符的索引
# DMatch.imgIdx - 训练图像的索引。
# 使用plt将两个图像的匹配结果显示出来
img3 = cv2.drawMatches(img1=img1,keypoints1=kp1,img2=img2,keypoints2=kp2, matches1to2=matches, outImg=img2, flags=2)
plt.imshow(img3)
plt.show()
return
def orb_knn(img1, img2):
# 使用ORB特征检测器和描述符,计算关键点和描述符
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# 暴力匹配BFMatcher,遍历描述符,确定描述符是否匹配,然后计算匹配距离并排序
# BFMatcher函数参数:
# normType:NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2。
# NORM_L1和NORM_L2是SIFT和SURF描述符的优先选择,NORM_HAMMING和NORM_HAMMING2是用于ORB算法
bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING, crossCheck=True)
# knnMatch 函数参数k是返回符合匹配的个数,暴力匹配match只返回最佳匹配结果。
matches = bf.knnMatch(des1, des2, k=1)
# 使用plt将两个图像的第一个匹配结果显示出来
# 若使用knnMatch进行匹配,则需要使用drawMatchesKnn函数将结果显示
img3 = cv2.drawMatchesKnn(img1=img1, keypoints1=kp1, img2=img2, keypoints2=kp2, matches1to2=matches, outImg=img2,
flags=2)
plt.imshow(img3)
plt.show()
return
if __name__ == '__main__':
img1 = cv2.imread(r'C:\Users\tianx\PycharmProjects\opencv\dataset\other\aa.jpg', 0)
img2 = cv2.imread(r'C:\Users\tianx\PycharmProjects\opencv\dataset\other\bb.jpg', 0)
orb_knn(img1,img2) |
# Enter your code here. Read input from STDIN. Print output to STDOUT
a = raw_input()
nums = map(int, raw_input().split())
uniq = {}
map(uniq.__setitem__, nums, [])
print sorted(uniq.keys(), reverse=True)[1]
|
import datetime
from ..extensions.database import database as db
from ..extensions.marshmallow import marsh
class Revision(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
submission_id = db.Column(db.Integer, db.ForeignKey(
'submission.id'), nullable=False)
create_by = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
comments = db.Column(db.String(1500), nullable=False)
attachment_filepath = db.Column(db.String(1500), nullable=True)
create_on = db.Column(db.DateTime, default=datetime.datetime.now())
def __init__(self, submission_id, create_by, comments, attachment_filepath):
self.submission_id = submission_id
self.create_by = create_by
self.comments = comments
self.attachment_filepath = attachment_filepath
class RevisionSchema(marsh.Schema):
class Meta():
fields = ('id', 'submission_id', 'create_by',
'comments', 'attachment_filepath', 'create_on')
revision_schema = RevisionSchema()
revisions_schema = RevisionSchema(many=True) |
# -*- coding: utf-8 -*-
class Solution:
def transformArray(self, arr):
modified = True
current = arr[:]
while modified:
modified = False
previous = current[:]
for i in range(1, len(previous) - 1):
if previous[i - 1] < previous[i] and previous[i + 1] < previous[i]:
modified = True
current[i] -= 1
elif previous[i - 1] > previous[i] and previous[i + 1] > previous[i]:
modified = True
current[i] += 1
return current
if __name__ == "__main__":
solution = Solution()
assert [6, 3, 3, 4] == solution.transformArray([6, 2, 3, 4])
assert [1, 4, 4, 4, 4, 5] == solution.transformArray([1, 6, 3, 4, 3, 5])
assert [2, 2, 1, 1, 1, 2, 2, 1] == solution.transformArray([2, 1, 2, 1, 1, 2, 2, 1])
|
projectPath = 'C:/Users/Peter/Documents/maya/projects/auto_rigging' |
import calendar
from django.db import models
class Department(models.Model):
name = models.CharField(max_length=1000)
shortname = models.CharField(max_length=5)
def __str__(self):
return self.name + ' - ' + self.shortname
# def get_default_department():
# return Department.objects.get_or_create(name='دانشکده مهندسی کامپیوتر', shortname='CE')
class Field(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=1000)
department = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.id) + ' - ' + self.name + ' - ' + str(self.department)
class Lecturer(models.Model):
name = models.CharField(max_length=1000)
mail = models.EmailField(default=' - ')
def __str__(self):
return self.name
class Course(models.Model):
field = models.ForeignKey(Field, on_delete=models.CASCADE)
lecturer = models.ForeignKey(Lecturer, on_delete=models.CASCADE)
def __str__(self):
return str(self.field) + ' - ' + str(self.lecturer)
class Lecture(models.Model):
group_id = models.IntegerField()
course = models.ForeignKey(Course, on_delete=models.CASCADE)
def __str__(self):
return str(self.course.field.id) + '-' + str(self.group_id) + ' - ' + self.course.field.name + ' - ' + \
str(self.course.lecturer)
class LectureSession(models.Model):
DAYS_OF_WEEK = (
('0', 'Saturday'),
('1', 'Sunday'),
('2', 'Monday'),
('3', 'Tuesday'),
('4', 'Wednesday'),
('5', 'Thursday'),
('6', 'Friday'),
)
lecture = models.ForeignKey(Lecture, on_delete=models.CASCADE)
start_time = models.TimeField()
end_time = models.TimeField()
day = models.CharField(max_length=1, choices=DAYS_OF_WEEK)
def __str__(self):
return str(self.lecture) + ' - ' + calendar.day_abbr[
(int(self.day) - 2) % 7] + ' from ' + self.start_time.strftime('%H:%M') + ' to ' + self.end_time.strftime(
'%H:%M')
class LectureClassSession(models.Model):
session_number = models.IntegerField()
date = models.DateField()
course = models.ForeignKey(Course, on_delete=models.CASCADE)
is_ta = models.BooleanField(default=False)
def __str__(self):
return ('TA ' if self.is_ta else '') + str(self.course) + ' - ' + self.date.strftime('%Y/%m/%d')
|
#!/usr/bin/env python
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import tqdm
import argparse
import os
import numpy as np
from pyjetty.mputils import *
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
import ROOT
def fill_branches(tw, j, dy_groomer, alphas=[], sds=[]):
tw.fill_branch('j', j)
for a in alphas:
dy_groomed = dy_groomer.result(j, a)
# if dy_groomed.pair().pt() > 0:
# tw.fill_branch('dg_{:.1f}'.format(a), dy_groomed.harder())
# tw.fill_branch('dg_{:.1f}'.format(a), dy_groomed.softer())
tw.fill_branch('dg_{:.1f}'.format(a), dy_groomed)
max_pt_groomed = dy_groomer.max_pt_softer(j)
tw.fill_branch('max_ptsoft', max_pt_groomed)
max_z_groomed = dy_groomer.max_z(j)
tw.fill_branch('max_z', max_z_groomed)
max_kt_groomed = dy_groomer.max_kt(j)
tw.fill_branch('max_kt', max_kt_groomed)
max_kappa_groomed = dy_groomer.max_kappa(j)
tw.fill_branch('max_kappa', max_kappa_groomed)
max_tf_groomed = dy_groomer.max_tf(j)
tw.fill_branch('max_tf', max_tf_groomed)
min_tf_groomed = dy_groomer.min_tf(j)
tw.fill_branch('min_tf', min_tf_groomed)
for i,sd in enumerate(sds):
j_sd = sd.result(j)
tw.fill_branch('sd{}'.format(i), j_sd)
sd_info = fjcontrib.get_SD_jet_info(j_sd)
tw.fill_branch('sd{}_z'.format(i), sd_info.z)
tw.fill_branch('sd{}_Delta'.format(i), sd_info.dR)
tw.fill_branch('sd{}_mu'.format(i), sd_info.mu)
tw.fill_branch('sd{}_kt'.format(i), sd_info.z * j_sd.pt() * sd_info.dR)
def fill_ncoll_branches(pythia, tw):
# The total number of separate sub-collisions.
tw.fill_branch('nCollTot', pythia.info.hiinfo.nCollTot())
# The number of separate non-diffractive sub collisions in the
# current event.
tw.fill_branch('nCollND', pythia.info.hiinfo.nCollND())
# The total number of non-diffractive sub collisions in the current event.
tw.fill_branch('nCollNDTot', pythia.info.hiinfo.nCollNDTot())
# The number of separate single diffractive projectile excitation
# sub collisions in the current event.
tw.fill_branch('nCollSDP', pythia.info.hiinfo.nCollSDP())
# The number of separate single diffractive target excitation sub
# collisions in the current event.
tw.fill_branch('nCollSDT', pythia.info.hiinfo.nCollSDT())
# The number of separate double diffractive sub collisions in the
# current event.
tw.fill_branch('nCollDD', pythia.info.hiinfo.nCollDD())
# The number of separate double diffractive sub collisions in the
# current event.
tw.fill_branch('nCollCD', pythia.info.hiinfo.nCollCD())
# The number of separate elastic sub collisions.
tw.fill_branch('nCollEL', pythia.info.hiinfo.nCollEL())
def main():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
pyconf.add_standard_pythia_args(parser)
# could use --py-seed
parser.add_argument('--fj-R', help='jet finder R', default=0.8, type=float)
parser.add_argument('--user-seed', help='pythia seed', default=-1, type=int)
parser.add_argument('--output', default='{}.root'.format(os.path.basename(__file__)), type=str)
parser.add_argument('--min-jet-pt', help='jet pt selection', default=50., type=float)
parser.add_argument('--max-jet-pt', help='jet pt selection', default=1000., type=float)
parser.add_argument('--npart-min', help='minimum npart in Argantyr', default=2, type=int)
args = parser.parse_args()
if args.user_seed < 0:
args.user_seed = -1
mycfg = []
else:
pinfo('user seed for pythia', args.user_seed)
# mycfg = ['PhaseSpace:pThatMin = 100']
mycfg = ['Random:setSeed=on', 'Random:seed={}'.format(args.user_seed)]
pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
if args.nev < 100:
args.nev = 100
# print the banner first
fj.ClusterSequence.print_banner()
print()
# set up our jet definition and a jet selector
jet_R0 = args.fj_R
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
print(jet_def)
# hadron level - ALICE
max_eta_hadron = 3.
pwarning('max eta for particles after hadronization set to', max_eta_hadron)
parts_selector_h = fj.SelectorAbsEtaMax(max_eta_hadron)
jet_selector = fj.SelectorPtMin(args.min_jet_pt) & fj.SelectorPtMax(args.max_jet_pt) & fj.SelectorAbsEtaMax(max_eta_hadron - 1.05 * jet_R0)
parts_selector_cent = fj.SelectorAbsEtaMax(5.) & fj.SelectorAbsEtaMin(3.)
hepmc2output = '{}.hepmc2.dat'.format(args.output.replace('.root', ''))
pyhepmc2writer = pythiaext.Pythia8HepMC2Wrapper(hepmc2output)
outf = ROOT.TFile(args.output, 'recreate')
outf.cd()
t = ROOT.TTree('t', 't')
tw = RTreeWriter(tree=t)
tch = ROOT.TTree('tch', 'tch')
twch = RTreeWriter(tree=tch)
te = ROOT.TTree('te', 'te')
twe = RTreeWriter(tree=te)
jet_def_lund = fj.JetDefinition(fj.cambridge_algorithm, 1.0)
dy_groomer = fjcontrib.DynamicalGroomer(jet_def_lund)
print (dy_groomer.description())
sds = []
sd01 = fjcontrib.SoftDrop(0, 0.1, 1.0)
sd02 = fjcontrib.SoftDrop(0, 0.2, 1.0)
sds.append(sd01)
sds.append(sd02)
# event loop
for iev in tqdm.tqdm(range(args.nev)):
if not pythia.next():
continue
twe.clear()
tw.clear()
twch.clear()
weight = pythia.info.weight()
if args.py_PbPb:
# from main113.cc
# Also fill the number of (absorptively and diffractively)
# wounded nucleaons.
nw = pythia.info.hiinfo.nAbsTarg() + pythia.info.hiinfo.nDiffTarg() + pythia.info.hiinfo.nAbsProj() + pythia.info.hiinfo.nDiffProj()
fill_ncoll_branches(pythia, twe)
else:
nw = 2
twe.fill_branch('nw', nw)
twe.fill_branch('w', weight)
parts_pythia_h = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal], 0, False)
parts_pythia_h_selected = parts_selector_h(parts_pythia_h)
parts_pythia_ch = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal, pythiafjext.kCharged], 0, False)
parts_pythia_ch_selected = parts_selector_h(parts_pythia_ch)
nch_total = len(parts_pythia_ch)
twe.fill_branch('nch', nch_total)
ncharged_fwd = len(parts_selector_cent(parts_pythia_ch))
twe.fill_branch('nchfwd', ncharged_fwd)
twe.fill_branch('iev', iev)
if args.py_PbPb and args.npart_min > nw:
twe.fill_tree()
continue
if args.py_PbPb:
pyhepmc2writer.fillEvent(pythia)
# do the rest only if centrality right
tw.fill_branch('iev', iev)
tw.fill_branch('w', weight)
twch.fill_branch('iev', iev)
twch.fill_branch('w', weight)
jets_h = jet_selector(fj.sorted_by_pt(jet_def(parts_pythia_h)))
jets_h_ch = jet_selector(fj.sorted_by_pt(jet_def(parts_pythia_ch)))
[fill_branches(tw, j, dy_groomer, alphas=[0.1, 1.0, 2.0], sds=sds) for j in jets_h]
if len(jets_h) > 0:
tw.fill_tree()
if args.py_PbPb is False:
pyhepmc2writer.fillEvent(pythia)
[fill_branches(twch, j, dy_groomer, alphas=[0.1, 1.0, 2.0], sds=sds) for j in jets_h_ch]
if len(jets_h_ch) > 0:
twch.fill_tree()
twe.fill_tree()
pythia.stat()
outf.Write()
outf.Close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that device and simulator bundles are built correctly.
"""
import plistlib
import TestGyp
import os
import struct
import subprocess
import sys
import tempfile
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
def CheckFileType(file, expected):
proc = subprocess.Popen(['lipo', '-info', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if not expected in o:
print 'File: Expected %s, got %s' % (expected, o)
test.fail_test()
def HasCerts():
# Because the bots do not have certs, don't check them if there are no
# certs available.
proc = subprocess.Popen(['security','find-identity','-p', 'codesigning',
'-v'], stdout=subprocess.PIPE)
return "0 valid identities found" not in proc.communicate()[0].strip()
def CheckSignature(file):
proc = subprocess.Popen(['codesign', '-v', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if "code object is not signed at all" in o:
print 'File %s not properly signed.' % (file)
test.fail_test()
def CheckEntitlements(file, expected_entitlements):
with tempfile.NamedTemporaryFile() as temp:
proc = subprocess.Popen(['codesign', '--display', '--entitlements',
temp.name, file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
data = temp.read()
entitlements = ParseEntitlements(data)
if not entitlements:
print 'No valid entitlements found in %s.' % (file)
test.fail_test()
if entitlements != expected_entitlements:
print 'Unexpected entitlements found in %s.' % (file)
test.fail_test()
def ParseEntitlements(data):
if len(data) < 8:
return None
magic, length = struct.unpack('>II', data[:8])
if magic != 0xfade7171 or length != len(data):
return None
return data[8:]
def GetProductVersion():
args = ['xcodebuild','-version','-sdk','iphoneos','ProductVersion']
job = subprocess.Popen(args, stdout=subprocess.PIPE)
return job.communicate()[0].strip()
def CheckPlistvalue(plist, key, expected):
if key not in plist:
print '%s not set in plist' % key
test.fail_test()
return
actual = plist[key]
if actual != expected:
print 'File: Expected %s, got %s for %s' % (expected, actual, key)
test.fail_test()
def CheckPlistNotSet(plist, key):
if key in plist:
print '%s should not be set in plist' % key
test.fail_test()
return
def ConvertBinaryPlistToXML(path):
proc = subprocess.call(['plutil', '-convert', 'xml1', path],
stdout=subprocess.PIPE)
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
test.run_gyp('test-device.gyp', chdir='app-bundle')
test_configs = ['Default-iphoneos', 'Default']
# TODO(justincohen): Disabling 'Default-iphoneos' for xcode until bots are
# configured with signing certs.
if test.format == 'xcode':
test_configs.remove('Default-iphoneos')
for configuration in test_configs:
test.set_configuration(configuration)
test.build('test-device.gyp', 'test_app', chdir='app-bundle')
result_file = test.built_file_path('Test App Gyp.bundle/Test App Gyp',
chdir='app-bundle')
test.must_exist(result_file)
info_plist = test.built_file_path('Test App Gyp.bundle/Info.plist',
chdir='app-bundle')
# plistlib doesn't support binary plists, but that's what Xcode creates.
if test.format == 'xcode':
ConvertBinaryPlistToXML(info_plist)
plist = plistlib.readPlist(info_plist)
CheckPlistvalue(plist, 'UIDeviceFamily', [1, 2])
if configuration == 'Default-iphoneos':
CheckFileType(result_file, 'armv7')
CheckPlistvalue(plist, 'DTPlatformVersion', GetProductVersion())
CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneOS'])
CheckPlistvalue(plist, 'DTPlatformName', 'iphoneos')
else:
CheckFileType(result_file, 'i386')
CheckPlistNotSet(plist, 'DTPlatformVersion')
CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneSimulator'])
CheckPlistvalue(plist, 'DTPlatformName', 'iphonesimulator')
if HasCerts() and configuration == 'Default-iphoneos':
test.build('test-device.gyp', 'sig_test', chdir='app-bundle')
result_file = test.built_file_path('sig_test.bundle/sig_test',
chdir='app-bundle')
CheckSignature(result_file)
info_plist = test.built_file_path('sig_test.bundle/Info.plist',
chdir='app-bundle')
plist = plistlib.readPlist(info_plist)
CheckPlistvalue(plist, 'UIDeviceFamily', [1])
entitlements_file = test.built_file_path('sig_test.xcent',
chdir='app-bundle')
if os.path.isfile(entitlements_file):
expected_entitlements = open(entitlements_file).read()
CheckEntitlements(result_file, expected_entitlements)
test.pass_test()
|
import Game as G
if __name__ == '__main__':
FirstMove = 'MAX'
min_max_depth = 10
board_dimension = 5
g = G.Game(board_dimension, FirstMove, min_max_depth) #inizializzo gioco
g.min_max_alfa_beta()
|
import pygame
from pygame.locals import *
from pygame.color import THECOLORS
import math
from sys import exit
import DigiMap
import Digimon
def config_window():
pygame.init()
screen_x = 600 + 200
screen_y = 600
screen = pygame.display.set_mode((screen_x, screen_y), 0, 32)
pygame.display.set_caption('Digital World')
return screen
def init_map():
regions = DigiMap.DigiMap()
return regions
def init_digimons(digtal_regions):
digimon_factory = Digimon.DigimonFactory(digtal_regions)
birth_pos = {'koromon': {
'pos': (150, 150),
'number': 5
},
'tanemon': {
'pos': (450, 150),
'number': 5
},
'tsunomon': {
'pos': (150, 450),
'number': 5
},
'yokomon': {
'pos': (450, 450),
'number': 5
},
'marineangemon': {
'pos': (300, 300),
'number': 1
}
}
digimon_groups = []
for kind_name, properties in birth_pos.items():
digimon_groups.append(digimon_factory.birth(kind_name, properties['pos'], properties['number']))
return digimon_groups
def main():
screen = config_window()
digital_regions = init_map()
digimon_groups = init_digimons(digital_regions)
clock = pygame.time.Clock()
event_list = []
event_size = 26
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
# pygame.time.delay(200)
screen.fill(THECOLORS['white'])
region_pos_list = digital_regions.get_rect_tuples()
region_color_list = digital_regions.get_region_colors()
'''
for i in range(digital_regions.get_region_size()):
if region_color_list[i] == 'marineangemon home':
continue
pygame.draw.rect(screen, THECOLORS[region_color_list[i]], list(region_pos_list[i]), 0)
'''
pygame.draw.rect(screen, (178, 200, 187), [0, 0, 600, 600], 0)
pygame.draw.rect(screen, (30, 41, 61), [600, 0, 200, 600], 0)
for digimon_group in digimon_groups:
digimon_group.group_walk(digimon_groups, event_list)
digimon_group.group_blit(screen)
digimon_group.remove_dead(event_list)
if len(event_list):
if len(event_list) > event_size:
event_list.pop(0)
font = pygame.font.SysFont('microsoft Yahei', 20)
for i in range(len(event_list)):
event_str = event_list[i]
event_color = (255, 200, 10)
if event_str.endswith('dead'):
event_color = (255, 0, 0)
surface = font.render(event_str, False, event_color)
screen.blit(surface, (610, 70 + i * 20))
font_event_title = pygame.font.SysFont('arial', 40)
surface_title = font_event_title.render('Digital Event', False, (255, 255, 255))
screen.blit(surface_title, (610, 10))
pygame.display.flip()
clock.tick(5)
if __name__ == '__main__':
main()
|
from django.conf import settings
from dotenv import load_dotenv
import requests
import os
load_dotenv(verbose=True)
### getCoordinates
# Input: adress object
# Input Format: { 'Street_Address': '', 'City' : '', 'State' : '', 'Zip code' : ''}
# Output: lat and lng of given address
# Output Format: {'lat': '', 'lng': '' } or {} if no geocoordinates returned from API call
def getCoordinates(address):
coordinates = {}
curr_address = "{} {} {} {}".format(address.get("street_address"), address.get("city"), address.get("state"), str(address.get("zip_code")) )
# print (curr_address)
response = requests.get("https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}".format(
curr_address,
os.getenv('GOOGLE_MAPS_API_KEY'),
))
response = response.json()
if response['status'] == "OK":
coordinates['lat'] = response['results'][0]['geometry']['location']['lat']
coordinates['lng'] = response['results'][0]['geometry']['location']['lng']
return coordinates |
from django.conf.urls.defaults import patterns, include, url
import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include('blog.urls')),
url(r'^produtos/', include('produtos.urls'))
)
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import unittest
import sbol3
import labop
import uml
from labop.execution_engine import ExecutionEngine
from labop_convert import MarkdownSpecialization
from labop_convert.behavior_specialization import DefaultBehaviorSpecialization
class TestSubprotocols(unittest.TestCase):
def test_subexecutions(self):
doc = sbol3.Document()
sbol3.set_namespace("http://bbn.com/scratch/")
subprotocol1 = labop.Protocol("sub1")
subprotocol2 = labop.Protocol("sub2")
primitive1 = labop.Primitive("primitive1")
protocol = labop.Protocol("protocol")
doc.add(subprotocol1)
doc.add(subprotocol2)
doc.add(primitive1)
doc.add(protocol)
protocol.primitive_step(subprotocol1)
protocol.primitive_step(primitive1)
protocol.primitive_step(subprotocol2)
ee = ExecutionEngine()
ee.specializations[0]._behavior_func_map[
primitive1.identity
] = lambda call, ex: None # Register custom primitives in the execution engine
ex = ee.execute(
protocol,
sbol3.Agent("test_agent"),
id="test_execution1",
parameter_values=[],
)
ordered_executions = ex.get_ordered_executions()
self.assertListEqual(
[x.identity for x in ordered_executions],
[
"http://bbn.com/scratch/test_execution1/CallBehaviorExecution1",
"http://bbn.com/scratch/test_execution1/CallBehaviorExecution2",
"http://bbn.com/scratch/test_execution1/CallBehaviorExecution3",
],
)
if not ee.is_asynchronous:
# Asynchronous execution will not include subprotocol executions, rather just the tokens inside them that execution.
subprotocol_executions = ex.get_subprotocol_executions()
self.assertListEqual(
[x.protocol.lookup() for x in subprotocol_executions],
[subprotocol1, subprotocol2],
)
if __name__ == "__main__":
unittest.main()
|
def next_pal(val):
while True:
val+=1
nxt = str(val)
if nxt == nxt[::-1]:
return int(nxt)
'''
There were and still are many problem in CW about palindrome numbers and palindrome
strings. We suposse that you know which kind of numbers they are. If not, you may
search about them using your favourite search engine.
In this kata you will be given a positive integer, val and you have to create the
function next_pal()(nextPal Javascript) that will output the smallest palindrome
number higher than val.
Let's see:
For Python
next_pal(11) == 22
next_pal(188) == 191
next_pal(191) == 202
next_pal(2541) == 2552
You will be receiving values higher than 10, all valid.
'''
|
import model_eval.eval_batch
|
name = input("enter your name: ")
lst = ['a','e','i','o','u']
#using lambda
print(len(list(filter(lambda x:x in lst,name))))
print(list(filter(lambda x:x in lst,name)))
#using for
c=0
for n in name:
if n in lst:
c+=1
print(c)
|
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'news', views.NewsVeiwSet)
|
'''
go列为多个GO,分号间隔;每个都需要查询得到term
gene_swiss_GO.id
'''
input_file1 = open("gene_swiss_GO.id")
input_file2 = open("go_term_class.tab")
out_file = open("gene_GOterm.out","w")
id_term_dict = {}
for line in input_file2:
line = line.strip()
GO_id = line.split("\t")[0]
GO_term = line.split("\t")[1]
id_term_dict[GO_id] = GO_term
for line in input_file1:
line = line.strip()
text_list = line.split("\t")
gene_id = text_list[0]
if len(text_list)==2:
id_term = " "
else:
go_id_all = (text_list[2])
go_id_list = go_id_all.split(";")
id_term_list = []
for go_id in go_id_list:
go_id = go_id.strip()
if go_id in id_term_dict.keys():
go_term = id_term_dict[go_id]
else:
go_term = ""
id_plus_term = go_id + "#" +go_term
id_term_list.append(id_plus_term)
id_term = (";").join(id_term_list)
out_file.write(gene_id + "\t"+ id_term + "\n")
out_file.close() |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from image_util import show_image
from util import y_indicator, classification_rate
from util import pca_find_n_components, pca_transform
from logistic import Logistic
def get_data(nrows=None):
'''Each image is 28x28=784 pixels, pixel values 0-255'''
data = pd.read_csv('data/mnist/train.csv', nrows=nrows)
columns = ['pixel'+str(i) for i in range(784)]
X = data[columns].values / 255.0
Y = data['label'].values
return X, Y
def get_normalised_data(nrows=None):
'''
Each image is 28x28=784 pixels, pixel values 0-255.
Normalise each feature to have zero mean and unit variance.
'''
from sklearn.preprocessing import scale
data = pd.read_csv('data/mnist/train.csv', nrows=nrows)
columns = ['pixel'+str(i) for i in range(784)]
X = data[columns].values.astype(np.float32) / 255.0
X_scaled = scale(X)
Y = data['label'].values
return X_scaled, Y
def show_images(X, Y):
'''Show first 10 images.'''
for i in range(10):
show_image(X[i], label=str(Y[i]))
def logistic_fit(Xtrain, Xtest, Ytrain, Ytest, nepochs=1000):
model = Logistic()
model.fit(Xtrain, Xtest, Ytrain, Ytest, nepochs=nepochs)
plt.plot(model.costs_train, label='train')
plt.plot(model.costs_test, label='test')
plt.title('Cross entropy cost')
plt.xlabel('iterations')
plt.ylabel('cost')
plt.legend()
# plt.show()
def logistic_fit_pca(Xtrain, Xtest, Ytrain, Ytest, D, nepochs=1000):
Xtrain_pca, Xtest_pca = pca_transform(Xtrain, Xtest, D)
logistic_fit(Xtrain_pca, Xtest_pca, Ytrain, Ytest, nepochs=nepochs)
def main():
X, Y = get_data()
# show_images(X,Y)
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y,
test_size=0.1, random_state=42)
logistic_fit(Xtrain, Xtest, Ytrain, Ytest)
# npca = pca_find_n_components(X)
# print('Number principle components:', npca)
# logistic_fit_pca(Xtrain, Xtest, Ytrain, Ytest, npca)
if __name__=='__main__':
main()
|
from datetime import datetime
import json
try:
# avataan tiedosto
file_handle = open("guestbook.json", "r")
# haetaan tiedoston sisältö
content = file_handle.read()
# suljetaan tiedosto
file_handle.close()
messages = json.loads(content)
read_or_write = str(input("Haluatko lukea vai kirjoittaa vieraskirjaan? (l/k)\n"))
if read_or_write == "l":
# käydään läpi listan sisältämät viesti-dictionaryt
for message in messages:
# tulostetaan viesti-dictionaryn tiedot
message_text = message['message']
date = message['date']
time = message['time']
print(f""""{message_text}", kirjoitettu {date}, klo {time}""")
elif read_or_write == "k":
# pvm ja aika
now = datetime.now()
# pvm
date = now.strftime("%d.%m.%Y")
# aika
time = now.strftime("%H:%M:%S")
write_input = input("Kirjoita uusi viesti:\n")
# luodaan uusi viesti-dictionary
new_message = {
"message": write_input,
"date": date,
"time": time
}
# lisätään viesti-dict viestien listaan
messages.append(new_message)
# tallennetaan uusi lista tiedostoon
# muunnetaan JSONiksi
json_data = json.dumps(messages)
file_handle = open("guestbook.json", "w")
file_handle.write(json_data)
file_handle.close()
print("Viesti tallennettu vieraskirjaan.")
else:
print("Väärä muoto!")
except ValueError:
print("Väärä muoto!")
|
error_infos = {
'not_found':{'status':404,'message':'not found','data':''},
'forbidden':{'status':403,'message':'forbidden','data':''},
'gateway_timeout':{'status':504,'message':'gateway timeout','data':''},
'internal_server_error':{'status':500,'message':'internal server error','data':''}
}
rpc_infos = {
'btc':{'rpc_port':8332,'rpc_user':'apx','rpc_password':'DEOXMEIO943JKDJFIE3312DFKIEOK','method':'btc'},
'usdt':{'rpc_port':8338,'rpc_user':'usdt','rpc_password':'DJKQIEOOKDKLAKQOOEXMXMLLWOO','method':'btc'},
'bch':{'rpc_port':8336,'rpc_user':'bch','rpc_password':'FEOPQSUOEODKLJAKLIEQPLALMNMXKIOQ','method':'btc'},
'ltc':{'rpc_port':9337,'rpc_user':'exmoney','rpc_password':'TEIXMLW34803EDDKDLWQPAPW18389DKWOOPEOP','method':'btc'},
'eth':{'rpc_port':8545,'method':'eth'},
'etc':{'rpc_port':8546,'method':'eth'}
}
success_infos={
'new_address':{'status': 200,'message': 'success','data': { 'address': '' }},
'validate_address':{'status': 200,'message': 'success','data': { 'info': '' }},
'account':{'status': 200,'message': 'success','data': { 'info': '' }}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.