text stringlengths 8 6.05M |
|---|
import argparse
import os
import re
from modules.fitsfile import writefits
from modules.metafile import writemeta
from modules.textfile import writetext
def _outputpathlist(outputdir, gal):
file_endings = ['-folded-moments.txt',
'-folded-spectra.fits',
'-folded-misc.txt']
return [os.path.join(outputdir, gal, gal + e) for e in file_endings]
def _missingfiles(outputdir, gal):
return not all([os.path.isfile(f) for f in _outputpathlist(outputdir, gal)])
def _processgal(inputdir, outputdir, gal):
galdir_in = os.path.join(inputdir, gal, 'kinematics_paperversion',
'more_files')
if not os.path.isdir(galdir_in):
print 'No paperversion for {}, skipping to next galaxy'.format(gal)
return
s2_binspectra = os.path.join(galdir_in, gal + '-s2-folded-binspectra.fits')
s2_fullgalaxy = os.path.join(galdir_in, gal + '-s2-folded-fullgalaxy.fits')
s2_bininfo = os.path.join(galdir_in, gal + '-s2-folded-bininfo.txt')
s3_A_temps_1 = os.path.join(galdir_in, gal + '-s3-A-folded-temps-1.txt')
s3_A_temps_2 = os.path.join(galdir_in, gal + '-s3-A-folded-temps-2.txt')
s3_B_moments = os.path.join(galdir_in, gal + '-s3-B-folded-moments.txt')
s3_A_folded_main = os.path.join(galdir_in, gal + '-s3-A-folded-main.fits')
s3_B_folded_main = os.path.join(galdir_in, gal + '-s3-B-folded-main.fits')
s4_rprofiles = os.path.join(galdir_in, gal + '-s4-folded-rprofiles.txt')
s2_params = os.path.join(galdir_in, gal + '_s2_params.txt')
galdir_out = os.path.join(outputdir, gal)
if not os.path.exists(galdir_out):
os.makedirs(galdir_out)
outputpaths = _outputpathlist(outputdir, gal)
with open(outputpaths[0], 'w+b') as data_output, \
open(outputpaths[2], 'w+b') as meta_output:
writetext(s2_bininfo, s3_B_moments, s4_rprofiles, data_output)
writemeta(gal, meta_output, s2_bininfo, s3_A_temps_1, s3_A_temps_2,
s2_params, s3_B_moments, s4_rprofiles)
data_output.seek(0)
meta_output.seek(0)
writefits(s2_binspectra, s2_fullgalaxy, s3_A_folded_main,
s3_B_folded_main, data_output, s4_rprofiles,
meta_output, outputpaths[1])
def main():
desc = 'Creates public data from MASSIVE survey reduced data.'
parser = argparse.ArgumentParser(description=desc)
# required arguments
parser.add_argument('-d', '--directory', required=True,
help='Path to Reduced-Data folder.')
parser.add_argument('-o', '--output', required=True,
help='Path to destination directory.')
# optional arguments
parser.add_argument('-i', '--include',
help='Comma separated list of galaxies to include.')
parser.add_argument('-e', '--exclude',
help='Comma separated list of galaxies to exclude.')
parser.add_argument('-skip', '--skipcompleted', action='store_true',
help='Skips galaxies that were previously processed.')
args = vars(parser.parse_args())
datadir = args['directory']
outputdir = args['output']
files = os.listdir(datadir)
search = re.compile(r'^[A-Z]+\d+$').search
galaxies = set(m.group(0) for m in (search(f) for f in files) if m)
if args['skipcompleted']:
alldirs = os.listdir(outputdir)
galdirs = set(m.group(0) for m in (search(f) for f in alldirs) if m)
completed = [x for x in galdirs if not _missingfiles(outputdir, x)]
galaxies = galaxies.difference(completed)
if args['include'] is not None:
include = [x.strip() for x in args['include'].split(',')
if x and not x.isspace()]
galaxies = galaxies.intersection(include)
if args['exclude'] is not None:
exclude = [x.strip() for x in args['exclude'].split(',')
if x and not x.isspace()]
galaxies = galaxies.difference(exclude)
for g in sorted(galaxies):
print 'Processing {}'.format(g)
_processgal(datadir, outputdir, g)
if __name__ == '__main__':
main()
|
# as in tambura
from pippi import dsp
from pippi import tune
midi = {'lpd': 7}
def play(ctl):
param = ctl.get('param')
lpd = ctl.get('midi').get('lpd')
scale = [ dsp.randchoose([1, 5, 8]) for s in range(dsp.randint(2, 4)) ]
freqs = tune.fromdegrees(scale, root='eb', octave=dsp.randint(0, 2))
freq = dsp.randchoose(freqs)
pw = lpd.get(2, low=0.01, high=1, default=1)
pw = dsp.rand(0.01, 1)
modr = lpd.get(6, low=0.001, high=0.1)
modr = dsp.rand(0.001, 0.005)
modr = dsp.rand(0, modr)
modf = dsp.rand(0.01, 0.05)
amp = lpd.get(1, low=0, high=2, default=0)
amp = dsp.rand(0.5, 0.8)
length = dsp.stf(lpd.get(5, low=0.5, high=14, default=1) * dsp.rand(0.75, 2))
length = dsp.stf(dsp.rand(1, 3) * dsp.rand(0.75, 2))
wf = dsp.breakpoint([0] + [ dsp.rand(-1, 1) for w in range(5) ] + [0], 512)
#wf = dsp.wavetable('sine2pi', 512)
#wf = dsp.wavetable('sine2pi', 512)
#win = dsp.wavetable('sine', 512)
win = dsp.breakpoint([0] + [ dsp.rand(0, 1) for w in range(5) ] + [0], 512)
mod = dsp.breakpoint([0] + [ dsp.rand(0, 1) for m in range(5) ] + [0], 512)
layers = []
harmonics = [1, 2, 3]
for harmonic in harmonics:
f = freq * harmonic
if harmonic > 4:
a = dsp.rand(0.5, 1)
else:
a = amp * dsp.rand(0.5, 1)
layer = dsp.pulsar(f, length, pw, wf, win, mod, modr, modf, a * 2)
layer = dsp.env(layer, dsp.randchoose(['sine', 'tri', 'line', 'phasor']))
layer = dsp.taper(layer)
layer = dsp.pan(layer, dsp.rand())
layer = dsp.mix([ dsp.drift(layer, dsp.rand(0.01, 0.03)), layer ])
layer = dsp.vsplit(layer, dsp.mstf(5), dsp.mstf(1500))
layer = dsp.randshuffle(layer)
layer = ''.join(layer)
"""
if dsp.rand() > 0.5:
layer = dsp.vsplit(layer, dsp.mstf(50), dsp.mstf(500))
bit = dsp.randchoose(layer)
bit = bit * dsp.randint(1, 3)
bit = dsp.transpose(bit, dsp.randchoose([1, 2, 4, 8]))
layer = ''.join(layer)
layer = dsp.insert_into(layer, bit, dsp.randint(0, dsp.flen(layer) - dsp.flen(bit)))
"""
layers += [ layer ]
out = dsp.mix(layers)
out = dsp.env(out, 'sine')
out = dsp.env(out, 'hann')
out = dsp.taper(out)
#out = dsp.env(out, 'random')
return out
|
__all__ = ["PAGE_TITLE"]
PAGE_TITLE = "Google" |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-11-09 10:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nova', '0030_sql'),
]
operations = [
migrations.CreateModel(
name='Database',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('env', models.CharField(max_length=20, verbose_name='\u73af\u5883')),
('ip', models.CharField(max_length=20, verbose_name='ip')),
('port', models.CharField(max_length=20, verbose_name='\u7aef\u53e3')),
('db_name', models.CharField(max_length=20, verbose_name='\u6570\u636e\u5e93\u540d\u79f0')),
('username', models.CharField(max_length=20, verbose_name='\u7528\u6237\u540d')),
('password', models.CharField(max_length=20, verbose_name='\u5bc6\u7801')),
],
),
]
|
from string import ascii_lowercase, ascii_uppercase
from random import choice, randint, shuffle
from Dictionaries import uppercasedict as updict
from Dictionaries import lowercasedict as lowdict
from os import system
import sys
sys.path.insert (0, '~/Desktop/Matteo/Informatica/Python/Password/Dictionaries')
pool = []
letters = []
password = ''
# LOWERCASE #
for i in range(4):
pool.append(choice(ascii_lowercase))
# UPPERCASE #
for i in range(2):
pool.append(choice(ascii_uppercase))
# NUMBER #
for i in range(2):
pool.append(str(randint(0,9)))
# PASSWORD #
shuffle(pool)
password = ''.join(pool)
for i in range(len(password)):
if password[i] in updict.upperdict:
letters.append(updict.upperdict[password[i]])
elif password[i] in lowdict.lowerdict:
letters.append(lowdict.lowerdict[password[i]])
else:
letters.append(password[i])
# FILE #
file = open('password.txt', 'w')
file.write(password + '\n')
file.write(str( pool ) + '\n')
file.write(str( letters ))
file.close()
# OPEN FILE #
command = 'gedit ~/Desktop/Matteo/Informatica/Python/Password/password.txt'
system(command)
|
from rv.modules import Behavior as B
from rv.modules import Module
from rv.modules.base.dcblocker import BaseDcBlocker
class DcBlocker(BaseDcBlocker, Module):
behaviors = {B.receives_audio, B.sends_audio}
|
from select import select
from tkinter import *
import tkinter.scrolledtext as scrolledtext
Keyboard_App = Tk()
Keyboard_App.title("Master keyboard")
Keyboard_App.resizable(0, 0)
def select(value):
if value == "<-":
txt = text.get(1.0, END)
val = len(txt)
text.delete(1.0, END)
text.insert(1.0, txt[:val-2])
elif value == "space":
text.insert(END, "")
elif value == "Tab":
text.insert(END, "")
else:
text.insert(END, value)
text = scrolledtext.ScrolledText(Keyboard_App, width=120, wrap=WORD, padx=10, pady=10, relief=RIDGE)
text.grid(row=1, columnspan=16)
buttons = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '< ', '7', '8', '9', '-',
'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', '[', ']', '4', '5', '6', '+',
'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', 'tab', '0', '1', '2', '3', '/',
'caps', 'bskp', ';', '"', '-', '=', 'j', 'k', 'l', '[', ']', '4', '5', '6', '+', ]
varRow = 2
varCol = 0
for button in buttons:
command = lambda x=button: select(x)
if varRow != 8:
Button(Keyboard_App, text=button, width=5, bg='black', fg='white', activebackground='black',
relief=RIDGE, padx=8, pady=4, bd=4, command=command, ).grid(row=varRow, column=varCol)
if button == 'space':
Button(Keyboard_App, text=button, width=5, bg='black', fg='white', activebackground='black',
relief=RIDGE, padx=180, pady=4, bd=6, command=command, ).grid(row=6, columnspan=16)
varCol += 1
if varCol > 14 and varRow == 2:
varCol = 0
varRow += 0
if varCol > 14 and varRow == 3:
varCol = 0
varRow = 1
Keyboard_App.mainloop()
def press():
return None |
date = (3,30,2019,9,25)
print(f"{date[3]:0>2}/{date[4]:0>2}/{date[2]} {date[0]:0>2}:{date[1]:0>2}") |
# encoding: utf-8
# A flag to differentiate between client and worker code
IS_CLIENT = False
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 31 21:42:17 2020
@author: maurop
"""
# =============================================================================
# Imports
# =============================================================================
import time
#==============================================================================
# Rate class
# for update rate, fps, ...
#==============================================================================
class Rate:
'''
Rate
small class to manage fps and various update rates
'''
def __init__(self, rate):
'''Initialize the rate calling for the time function
Keyword argument:
rate -- is a float representing 1/s frame rate
'''
self.rate = rate
self.init_time = time.time()
def is_time(self):
'''Returns true if the current time surpasses the rate'''
if time.time() - self.init_time > self.rate:
self.init_time = time.time()
return True
else:
return False |
from gensim.models.phrases import Phraser
from gensim.models import Word2Vec
from scipy.spatial.distance import cosine
from nltk import pos_tag
from collections import defaultdict
from ..nlp_utils.common import *
from ..nlp_utils.pos_tag import *
from ..nlp_utils.time import *
import numpy as np
init_tagger = Tagger(locations)
time_tagger = TimeTagger()
e_tag = ElementTagger()
def process_for_ocr(text):
final_text = defaultdict(lambda : defaultdict(float))
for word in text:
final_text[word][word] = 1
for i in range(0, len(word)-1):
if len(word[:i+1]) > 1:
final_text[word][word[:i+1]] += (i+1) / len(word)
if len(word[i+1:]) > 1:
final_text[word][word[i+1:]] += 1 - (i+1)/len(word)
return final_text
def search(wordset, text):
results = []
text = " " + text + " "
for keyword in wordset:
if keyword:
if " " + keyword + " " in text:
results.append(keyword)
# if re.search(r'\b' + re.escape(keyword) + r'\b', text, re.IGNORECASE):
# results.append(keyword)
return results
# Partial match only
def search_possible_location(text):
results = []
for location in locations:
for i, extra in enumerate(locations[location]):
if re.search(r'\b' + re.escape(extra) + r'\b', text, re.IGNORECASE):
if extra not in results:
results.append(location)
return results
# gps_location_sets = {location: set([pl for pl in location.lower().replace(',', ' ').split() if pl not in stop_words]) for location, gps in map_visualisation}
gps_not_lower = {}
for loc in locations:
for origin_doc, (lat, lon) in map_visualisation:
if loc == origin_doc.lower():
gps_not_lower[loc] = origin_doc
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
class Query:
def __init__(self, text, shared_filters=None):
self.negative = ""
self.disable_region = False
if "—disable_region" in text:
print("Disabling region")
self.disable_region = True
text = text.replace("—disable_region", "")
self.disable_location = False
if "—disable_location" in text:
print("Disabling location")
self.disable_location = True
text = text.replace("—disable_location", "")
if "NOT" in text:
text, self.negative = text.split("NOT")
self.negative = self.negative.strip(". \n").lower()
self.negative = [word for word in self.negative.split() if word in all_keywords]
text = text.strip(". \n").lower()
self.time_filters = None
self.date_filters = None
self.driving = False
self.on_airplane = False
self.ocr_queries = []
self.location_queries = []
self.query_visualisation = defaultdict(list)
self.location_filters = []
self.country_to_visualise = []
self.extract_info(text, shared_filters)
def extract_info(self, text, shared_filters=None):
def search_words(wordset):
return search(wordset, text)
self.original_text = text
quoted_text = " ".join(re.findall(r'\"(.+?)\"', text))
text = text.replace(f'"{quoted_text}"', "")
if "driving" in text:
self.driving = True
text = text.replace("driving", "")
if "on airplane" in text:
self.on_airplane = True
# text = text.replace("on airplane", "")
self.ocr = process_for_ocr(quoted_text.split())
if not self.disable_location:
self.locations = search_words(locations)
self.place_to_visualise = [gps_not_lower[location] for location in self.locations]
if self.locations:
self.query_visualisation["LOCATION"].extend(self.locations)
else:
possible_locations = search_possible_location(text)
if possible_locations:
self.query_visualisation["POSSIBLE LOCATION(S)"].extend(possible_locations)
else:
self.locations = []
self.place_to_visualise = []
print("Locations:", self.locations)
for loc in self.locations:
text = rreplace(text, loc, "", 1) #TODO!
if not self.disable_region:
self.regions = search_words(regions)
else:
self.regions = []
for reg in self.regions:
self.query_visualisation["REGION"].append(reg)
for country in countries:
if reg == country.lower():
self.country_to_visualise.append({"country": country, "geojson": countries[country]})
for region in self.regions:
text = rreplace(text, region, "", 1) #TODO!
# processed = set([w.strip(",.") for word in self.regions +
# self.locations for w in word.split()])
# if not full_match:
# # self.locations.extend(search_words(
# # [w for w in ["hotel", "restaurant", "airport", "station", "cafe", "bar", "church"] if w not in self.locations]))
# for loc in self.locations[len(self.gps_results):]:
# for place, _ in map_visualisation:
# if loc in place.lower().split():
# self.place_to_visualise.append(place)
# if full_match:
# for loc in self.locations:
# self.query_visualisation["LOCATION"].append(loc)
# else:
# for loc in self.locations:
# self.query_visualisation["POSSIBLE LOCATION"].append(loc)
self.weekdays = []
self.dates = None
self.start = (0, 0)
self.end = (24, 0)
tags = time_tagger.tag(text)
processed = set()
for i, (word, tag) in enumerate(tags):
if word in processed:
continue
if tag in ["WEEKDAY", "TIMERANGE", "TIMEPREP", "DATE", "TIME", "TIMEOFDAY"]:
processed.add(word)
# self.query_visualisation["TIME" if "TIME" in tag else tag].append(word)
if tag == "WEEKDAY":
self.weekdays.append(word)
elif tag == "TIMERANGE":
s, e = word.split("-")
self.start = adjust_start_end(
"start", self.start, *am_pm_to_num(s))
self.end = adjust_start_end("end", self.end, *am_pm_to_num(e))
elif tag == "TIME":
if word in ["2015", "2016", "2018", "2019", "2020"]:
self.dates = get_day_month(word)
else:
timeprep = ""
if i > 1 and tags[i-1][1] == 'TIMEPREP':
timeprep = tags[i-1][0]
if timeprep in ["before", "earlier than", "sooner than"]:
self.end = adjust_start_end(
"end", self.end, *am_pm_to_num(word))
elif timeprep in ["after", "later than"]:
self.start = adjust_start_end(
"start", self.start, *am_pm_to_num(word))
else:
h, m = am_pm_to_num(word)
self.start = adjust_start_end(
"start", self.start, h - 1, m)
self.end = adjust_start_end("end", self.end, h + 1, m)
elif tag == "DATE":
self.dates = get_day_month(word)
elif tag == "TIMEOFDAY":
if word not in ["lunch", "breakfast", "dinner", "sunrise", "sunset"]:
processed.add(word)
# self.query_visualisation["TIME" if "TIME" in tag else tag].append(word)
timeprep = ""
if i > 1 and tags[i-1][1] == 'TIMEPREP':
timeprep = tags[i-1][0]
if "early" in timeprep:
if "early; " + word in timeofday:
word = "early; " + word
elif "late" in timeprep:
if "late; " + word in timeofday:
word = "late; " + word
if word in timeofday:
s, e = timeofday[word].split("-")
self.start = adjust_start_end(
"start", self.start, *am_pm_to_num(s))
self.end = adjust_start_end(
"end", self.end, *am_pm_to_num(e))
else:
print(
word, f"is not a registered time of day ({timeofday})")
print(processed)
print(tags)
if shared_filters:
if not self.weekdays:
self.weekdays.extend(shared_filters.weekdays)
if self.dates is None:
self.dates = shared_filters.dates
unprocessed = [(word, tag) for (word, tag) in tags if word not in processed]
last_non_prep = 0
self.clip_text = ""
for i in range(1, len(unprocessed) + 1):
if unprocessed[-i][1] not in ["DT", "IN"] and unprocessed[-i][0] not in stop_words:
last_non_prep = i
break
if last_non_prep > 1:
self.clip_text = " ".join([word for word, tag in unprocessed[:-(last_non_prep - 1)]])
else:
self.clip_text = " ".join(
[word for word, tag in unprocessed])
self.clip_text = self.clip_text.strip(", ")
print("CLIP:", self.clip_text)
# self.query_visualisation[self.clip_text] = "CLIP"
def get_info(self):
return {"query_visualisation": [(hint, ", ".join(value)) for hint, value in self.query_visualisation.items()],
"country_to_visualise": self.country_to_visualise,
"place_to_visualise": self.place_to_visualise}
def time_to_filters(self):
if not self.time_filters:
# Time
s, e = self.start[0], self.end[0]
if s > e: # TODO!
s, e = e, 24
self.time_filters = {
"range":
{
"hour":
{
"gte": s,
"lte": e
}
}
}
# Date
self.date_filters = []
if self.dates:
y, m, d = self.dates
if y:
self.date_filters.append({"term": {"year": str(y)}})
if m:
self.date_filters.append(
{"term": {"month": str(m).rjust(2, "0")}})
if d:
self.date_filters.append(
{"term": {"date": str(d).rjust(2, "0")}})
if self.start[0] != 0 and self.end[0] != 24:
self.query_visualisation["TIME"] = [f"{self.start[0]}:00 - {self.end[0]}:00"]
if str(self.dates) != "None":
self.query_visualisation["DATE"] = [str(self.dates)]
return self.time_filters, self.date_filters
def make_ocr_query(self):
if not self.ocr_queries:
self.ocr_queries = []
for ocr_word in self.ocr:
dis_max = []
for ocr_word, score in self.ocr[ocr_word].items():
dis_max.append(
{"rank_feature": {"field": f"ocr_score.{ocr_word}", "boost": 200 * score, "linear": {}}})
self.ocr_queries.append({"dis_max": {
"queries": dis_max,
"tie_breaker": 0.0}})
return self.ocr_queries
#TODO: multiple word in OCR
def make_location_query(self):
if not self.location_filters:
for loc in self.locations:
place = gps_not_lower[loc]
place = gps_not_lower[loc]
dist = "0.5km"
pivot = "5m"
if "airport" in loc or "home" in loc:
dist = "2km"
pivot = "200m"
elif "dcu" in loc:
dist = "1km"
pivot = "100m"
for place_iter, (lat, lon) in map_visualisation:
if place == place_iter:
# self.location_queries.append({
# "distance_feature": {
# "field": "gps",
# "pivot": pivot,
# "origin": [lon, lat],
# "boost": score * 50
# }
# })
self.location_filters.append({
"geo_distance": {
"distance": dist,
"gps": [lon, lat]
}
})
break
# # General:
# if len(self.gps_results) < len(self.locations):
# for loc in self.locations[len(self.gps_results):]:
# loc_set = set(loc.split())
# for place, (lat, lon) in map_visualisation:
# set_place = gps_location_sets[place]
# if loc_set.issubset(set_place):
# pivot = "5m"
# if "airport" in set_place:
# pivot = "200m"
# self.location_filters.append({
# "geo_distance": {
# "distance": "2km",
# "gps": [lon, lat]
# }
# })
# elif "dcu" in set_place:
# pivot = "100m"
# self.location_queries.append({
# "distance_feature": {
# "field": "gps",
# "pivot": pivot,
# "origin": [lon, lat],
# "boost": len(loc_set) / len(set_place) * 50
# }
# })
# if self.location_queries:
# return {"dis_max": {"queries": self.location_queries, "tie_breaker": 0.0}}
return self.location_filters
|
from django import forms
from django.core import validators
from myapp.models import User
from .models import *
class Authentic(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields =("username","password","first_name","last_name", 'email')
class UploadForm(forms.ModelForm):
class Meta:
model = seats
fields = ['slot_name']
|
def merge_the_tools(string, k):
# your code goes here
t = []
for i in range(len(string)//k):
start = i * k
t.append(string[start: start + k])
#print(t)
for s in t:
u = ""
for c in s:
if c not in u:
u += c
print(u)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k) |
# From http://astroweb.case.edu/jakub/TA/aitoff.py
#USED to project Aitoff data points and grid lines (assumes input in degrees)
import numpy as np
import matplotlib.pyplot as plt
degrad = np.pi/180.
def project(li,bi,lz):
sa = li-lz
if len(sa) == 1:
sa = np.zeros(1)+sa
x180 = np.where(sa >= 180.0)
sa = sa
sa[x180] = sa[x180]-360.0*abs(np.cos(lz*degrad/2.))#uncomment b=0
alpha2 = sa*degrad/2.
delta = bi*degrad
r2 = np.sqrt(2.)
f = 2.*r2/np.pi
cdec = np.cos(delta)
denom = np.sqrt(1.+cdec*np.cos(alpha2))
xx = cdec*np.sin(alpha2)*2.*r2/denom
yy = np.sin(delta)*r2/denom
xx = xx*(1./degrad)/f
yy = yy*(1./degrad)/f
return xx,yy
def project_grid(li,bi):
sa = -(li-180.) #UNCOMENT lz=0
alpha2 = sa*degrad/2.
delta = bi*degrad
r2 = np.sqrt(2.)
f = 2.*r2/np.pi
cdec = np.cos(delta)
denom = np.sqrt(1.+cdec*np.cos(alpha2))
xx = cdec*np.sin(alpha2)*2.*r2/denom
yy = np.sin(delta)*r2/denom
xx = xx*(1./degrad)/f
yy = yy*(1./degrad)/f
return xx,yy
def air_plot(Lex,Bex,Lex1,Bex1,X,Y,XX,YY,lz):
plt.plot(X,Y,color='black')
plt.plot(XX.T,YY.T,color='black')
#GRID LABELS
for i in range(len(Lex)):
if Lex[i] <= lz:
fitter = XX[i]-8
else:
fitter = XX[i]+3
if Lex[i] < 0:
Lex[i]=Lex[i]+360
if Lex[i] != 360.:
plt.text(fitter,0,str(int(Lex[i])),fontsize=12,rotation=90)
for i in range(len(Bex1)):
if Bex1[i] > 0.:
fitter = YY1[i]+1
else:
fitter = YY1[i]-6
plt.text(1,fitter,str(int(Bex1[i])),fontsize=12)
#END GRID LABELS
def gridlines(lz,fig,ax):
Lex = np.linspace(0,360,9)
Bex = np.linspace(0,180,180)-90.
Lex1 = np.linspace(0,360,360)
Bex1 = np.linspace(0,180,7)-90.
Lgrid,Bgrid = np.meshgrid(Lex,Bex)
Lgrid1,Bgrid1 = np.meshgrid(Lex1,Bex1)
X,Y = project_grid(Lgrid,Bgrid)
XX,YY = project_grid(Lgrid1,Bgrid1)
ax.plot(X,Y,'--',color='black')
ax.plot(XX.T,YY.T,'--',color='black')
for i in range(len(Lex)):
if Lex[i] <= lz:
fitter = X[int(X.shape[0]/2),Lex.size-1-i]-8
else:
fitter = X[int(X.shape[0]/2),Lex.size-1-i]+3
Lex[i] = Lex[i]-180.-lz
if Lex[i] < 0:
Lex[i]=Lex[i]+360.
if Lex[i] != 360.:
ax.text(fitter,0,str(int(Lex[i])),fontsize=12,rotation=90)
for i in range(len(Bex1)):
if Bex1[i] > 0.:
fitter = YY[i,int(len(YY[i])/2)]+1
else:
fitter = YY[i,int(len(YY[i])/2)]-6
ax.text(1,fitter,str(int(Bex1[i])),fontsize=12)
fig.gca().get_xaxis().set_visible(False)
fig.gca().get_yaxis().set_visible(False)
|
# -*- coding: utf-8 -*-
from irc3.utils import wraps_with_context
from irc3.compat import asyncio
import venusian
import re
def plugin(wrapped):
"""register a class as plugin"""
setattr(wrapped, '__irc3_plugin__', True)
setattr(wrapped, '__irc3d_plugin__', False)
return wrapped
class event:
r"""register a method or function an irc event callback::
>>> @event(r'^:\S+ 353 [^&#]+(?P<channel>\S+) :(?P<nicknames>.*)')
... def on_names(bot, channel=None, nicknames=None):
... '''this will catch nickname when you enter a channel'''
... print(channel, nicknames.split(':'))
The callback can be either a function or a plugin method
If you specify the `iotype` parameter to `"out"` then the event will be
triggered when the regexp match something **sent** by the bot.
For example this event will repeat private messages sent by the bot to the
`#irc3` channel::
>>> @event(r'PRIVMSG (?P<target>[^#]+) :(?P<data>.*)', iotype='out')
... def msg3(bot, target=None, data=None):
... bot.privmsg('#irc3',
... '<{0}> {1}: {2}'.format(bot.nick, target, data))
"""
venusian = venusian
def __init__(self, regexp, callback=None, iotype='in',
venusian_category='irc3.rfc1459'):
if iotype == 'out':
re_out = getattr(regexp, 're_out', None)
if re_out is not None:
regexp = re_out
try:
re.compile(getattr(regexp, 're', regexp))
except Exception as e:
raise e.__class__(str(e) + ' in ' + getattr(regexp, 're', regexp))
self.regexp = regexp
self.iotype = iotype
self.callback = callback
self.venusian_category = venusian_category
self.iscoroutine = False
if callback is not None:
self.iscoroutine = asyncio.iscoroutinefunction(callback)
def async_callback(self, kwargs): # pragma: no cover
return self.callback(**kwargs)
def compile(self, config):
regexp = getattr(self.regexp, 're', self.regexp)
if config:
regexp = regexp.format(**config)
return re.compile(regexp).match
def __call__(self, func):
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
self.callback = getattr(obj.get_plugin(ob), func.__name__)
else:
self.callback = wraps_with_context(func, obj)
# a new instance is needed to keep this related to *one* bot
# instance
e = self.__class__(self.regexp, self.callback,
venusian_category=self.venusian_category,
iotype=self.iotype)
obj.attach_events(e)
info = self.venusian.attach(func, callback,
category=self.venusian_category)
return func
def __repr__(self):
s = getattr(self.regexp, 'name', self.regexp)
name = self.__class__.__name__
return '<bound {0} {1} to {2}>'.format(name, s, self.callback)
def dcc_event(regexp, callback=None, iotype='in',
venusian_category='irc3.dcc'):
"""Work like :class:`~irc3.dec.event` but occurs during DCC CHATs"""
return event(regexp, callback=callback, iotype='dcc_' + iotype,
venusian_category=venusian_category)
def extend(func):
"""Allow to extend a bot:
Create a module with some useful routine:
.. literalinclude:: ../examples/myextends.py
..
>>> import sys
>>> sys.path.append('examples')
>>> from irc3 import IrcBot
>>> IrcBot.defaults.update(asynchronous=False, testing=True)
Now you can use those routine in your bot::
>>> bot = IrcBot()
>>> bot.include('myextends')
>>> print(bot.my_usefull_function(1))
my_usefull_function(*(1,))
>>> print(bot.my_usefull_method(2))
my_usefull_method(*(2,))
"""
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
instance = obj.get_plugin(ob)
f = getattr(instance, func.__name__)
else:
instance = obj
f = func
setattr(obj, f.__name__, f.__get__(instance, instance.__class__))
info = venusian.attach(func, callback, category='irc3.extend')
return func
|
import configparser
import networkx as nx
import itertools
import math
import random
import json
from tqdm import tqdm
import sys
import time
import timeit
import pickle
import sys
from pathlib import Path
class GenGraph(object):
def __init__(self, config_path):
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.root = Path(config_path).parent.parent
self.load_cpnet()
def load_resources(self):
self.concept2id, self.id2concept = {}, {}
with open(self.root / self.config["paths"]["concept_vocab"][3:], "r", encoding="utf8") as f:
for w in f.readlines():
self.concept2id[w.strip()] = len(self.concept2id)
self.id2concept[len(self.id2concept)] = w.strip()
self.relation2id, self.id2relation = {}, {}
with open(self.root / self.config["paths"]["relation_vocab"][3:], "r", encoding="utf8") as f:
for w in f.readlines():
self.id2relation[len(self.id2relation)] = w.strip()
self.relation2id[w.strip()] = len(self.relation2id)
with open(self.paths_fn, "rb") as fi:
self.paths_data = pickle.load(fi)
with open(self.concepts_fn, "r") as f:
self.concept_data = json.load(f)
def load_cpnet(self):
self.cpnet = nx.read_gpickle(self.root / self.config["paths"]["conceptnet_en_graph"][3:])
self.cpnet_simple = nx.Graph()
for u, v, data in self.cpnet.edges(data=True):
w = data['weight'] if 'weight' in data else 1.0
if self.cpnet_simple.has_edge(u, v):
self.cpnet_simple[u][v]['weight'] += w
else:
self.cpnet_simple.add_edge(u, v, weight=w)
def get_edge(self, src_concept, tgt_concept):
rel_list = self.cpnet[src_concept][tgt_concept]
return list(set([rel_list[item]["rel"] for item in rel_list]))
def plain_graph_generation(self, qcs, acs, paths, rels):
"""
Plain graph generation
"""
graph = nx.Graph()
for index, p in enumerate(paths):
for c_index in range(len(p)-1):
h = p[c_index]
t = p[c_index+1]
# TODO: the weight can computed by concept embeddings and relation embeddings of TransE
graph.add_edge(h,t, weight=1.0)
for qc1, qc2 in list(itertools.combinations(qcs, 2)):
if self.cpnet_simple.has_edge(qc1, qc2):
graph.add_edge(qc1, qc2, weight=1.0)
for ac1, ac2 in list(itertools.combinations(acs, 2)):
if self.cpnet_simple.has_edge(ac1, ac2):
graph.add_edge(ac1, ac2, weight=1.0)
if len(qcs) == 0:
qcs.append(-1)
if len(acs) == 0:
acs.append(-1)
if len(paths) == 0:
for qc in qcs:
for ac in acs:
graph.add_edge(qc,ac, rel=-1, weight=0.1)
g = nx.convert_node_labels_to_integers(graph, label_attribute='cid') # re-index
g_str = json.dumps(nx.node_link_data(g))
return g_str
def relational_graph_generation(self, qcs, acs, paths, rels):
"""
Relational graph generation, multiple edge types.
"""
graph = nx.MultiDiGraph()
for index, p in enumerate(paths):
rel_list = rels[index]
for c_index in range(len(p)-1):
h = p[c_index]
t = p[c_index+1]
if graph.has_edge(h,t):
existing_r_set = set([graph[h][t][r]["rel"] for r in graph[h][t]])
else:
existing_r_set = set()
for r in rel_list[c_index]:
# TODO: the weight can computed by concept embeddings and relation embeddings of TransE
# TODO: do we need to add both directions?
if r in existing_r_set:
continue
graph.add_edge(h,t, rel=r, weight=1.0)
for qc1, qc2 in list(itertools.combinations(qcs, 2)):
if self.cpnet_simple.has_edge(qc1, qc2):
rs = self.get_edge(qc1, qc2)
for r in rs:
graph.add_edge(qc1, qc2, rel=r, weight=1.0)
for ac1, ac2 in list(itertools.combinations(acs, 2)):
if self.cpnet_simple.has_edge(ac1, ac2):
rs = self.get_edge(ac1, ac2)
for r in rs:
graph.add_edge(ac1, ac2, rel=r, weight=1.0)
if len(qcs) == 0:
qcs.append(-1)
if len(acs) == 0:
acs.append(-1)
if len(paths) == 0:
for qc in qcs:
for ac in acs:
graph.add_edge(qc,ac, rel=-1, weight=0.1)
g = nx.convert_node_labels_to_integers(graph, label_attribute='cid') # re-index
g_str = json.dumps(nx.node_link_data(g))
return g_str
def process(self, concepts_fn, paths_fn):
self.concepts_fn = concepts_fn
self.paths_fn = paths_fn
self.load_resources()
final_text = ""
for index, qa_pairs in tqdm(enumerate(self.paths_data), desc="Building Graphs", total=len(self.paths_data)):
# print(self.concepts_data[index])
# print(self.paths_data[index])
# print(qa_pairs)
statement_paths = []
statement_rel_list = []
for qa_idx, qas in enumerate(qa_pairs):
if qas["paths"] is None:
cur_paths = []
cur_rels = []
else:
cur_paths = [item["path"] for item in qas["paths"]]
cur_rels = [item["rel"] for item in qas["paths"]]
statement_paths.extend(cur_paths)
statement_rel_list.extend(cur_rels)
qcs = [self.concept2id[c] for c in self.concept_data[index]["qc"]]
acs = [self.concept2id[c] for c in self.concept_data[index]["ac"]]
gstr = self.plain_graph_generation(qcs=qcs, acs=acs, paths=statement_paths, rels=statement_rel_list)
final_text += gstr + "\n"
out_graph_fn = Path(self.paths_fn).parent / f'{Path(self.paths_fn).stem}_graph'
with open(out_graph_fn, 'w') as fw:
fw.write(final_text)
print(f"Graph Done: {out_graph_fn}")
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import time
import homie
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from modules.homiedevice import HomieDevice
from modules.mysql import db
class Schedule(HomieDevice):
_states = {}
def loopHandler(self):
devs = self._db.pq("""SELECT count(deviceid) as count FROM device WHERE active=1 AND connected=1""")
devs = int(devs[0]['count'])
heating_device = self._db.pq("""SELECT value FROM options WHERE name = 'heating_control_property'""")
if not len(heating_device):
heating_device = None
else:
heating_device = int(heating_device[0]['value'])
schedules = self._db.pq("""SELECT propertyid, devicestring, nodestring, propertystring, max(enabled) as enabled, max(active) as active, invert FROM (
SELECT p.propertyid, p.devicestring, p.nodestring, p.propertystring, s.enabled,
IF(sc.schedulecomponentid IS NOT NULL
AND DAYOFYEAR(CURRENT_TIMESTAMP) >= DAYOFYEAR(s.start)
AND DAYOFYEAR(CURRENT_TIMESTAMP) <= DAYOFYEAR(s.end)
AND s.enabled = 1
AND IF(s.requiredevice, %s, 1)
, 1, 0) as active, s.invert, s.requiredevice
FROM schedule s
LEFT OUTER JOIN schedulecomponent sc ON s.scheduleid = sc.scheduleid
AND DAYOFWEEK(CURRENT_TIMESTAMP) = sc.day
AND TIME(CURRENT_TIMESTAMP) >= sc.start AND TIME(CURRENT_TIMESTAMP) < sc.end
INNER JOIN property p ON s.propertyid = p.propertyid
GROUP BY s.scheduleid
) inr GROUP BY propertyid""", [devs > 0])
for s in schedules:
if s['propertyid'] == heating_device:
continue
if s['active'] == 1 and s['enabled'] == 1:
newstate = 0 if s['invert'] == 1 else 1
else:
newstate = 1 if s['invert'] == 1 else 0
# print s['devicestring'],s['nodestring'],s['propertystring'], s['active'], s['invert'] == '1', newstate
if not (s['propertyid'] in self._states):
self._states[s['propertyid']] = newstate
else:
if self._states[s['propertyid']] != newstate:
logger.info('Schedule changed state: {d}/{n}/{p} val: {v}'.format(
d=s['devicestring'],
n=s['nodestring'],
p=s['propertystring'],
v=newstate
))
self.set(s, newstate)
self._states[s['propertyid']] = newstate
def main():
d = db()
Homie = homie.Homie("configs/schedule.json")
schedule = Schedule(d, Homie)
Homie.setFirmware("schedule-controller", "1.0.0")
Homie.setup()
while True:
schedule.loopHandler()
time.sleep(5)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
logger.info("Quitting.")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
# Funktioner
def find_prot(ecoli_dict, protein_name):
u""" Finder et protein i ecoli_dict med nøglen protein_name
args:
ecoli_dict: dict(String, String)
protein_name: String
returnerer:
protein_sequence: String
fejl:
Hvis ikke der findes et protein med navnet protein_name
returneres en Exception med en fejlmeddelelse.
"""
try:
return ecoli_dict[protein_name]
except:
return Exception("Kunne ikke finde ecoli protein med navnet %s" % protein_name)
def find_prot2(ecoli_dict, protein_regex):
u""" Finder alle nøgler i en dict der matcher protein_regex.
Hvis ingen nøgler matcher protein_regex returneres en tom liste.
args:
ecoli_dict: dict(String, String)
protein_regex: String
returnerer:
keys: List(String)
"""
regex = re.compile(protein_regex)
keys = []
for key in ecoli_dict:
if regex.match(key):
keys.append(key)
return keys
def read_fasta(filename):
u""" Læser filer i fasta formatet.
args:
filename: String
returnerer:
fasta_dict: dict(String, String)
"""
fasta_dict = {}
unparsed_strings = []
with open(filename, "r") as fasta_file:
for line in fasta_file:
if line[0] == ">": # Starten på en ny nøgle
(name, protein) = __parse_fasta(unparsed_strings)
fasta_dict[name] = protein
unparsed_strings = [line]
else:
unparsed_strings.append(line)
del fasta_dict[""] # Fordi unparsed_strings ved første kald til __parse_fasta
# er tom, skal der slettes en tom nøgle inden vi returnerer.
return fasta_dict
# Hjælpefunktioner
def __parse_fasta(lines):
u""" Intern hjælpefunktion.
Splitter navn og protein sekvensen samt fjerner linjeskift.
"""
if lines == []:
return ("","")
name = lines[0][1:-1] # Første tegn er ">", sidste tegn er et linjeskift.
protein = ""
for line in lines[1:]:
protein = protein + line[:-1] # Sidste tegn er et linjeskift.
return(name, protein) |
from django.shortcuts import render
from django.views.generic import View
# Create your views here.
class Taxation_ListView(View):
def get(self, *args, **kwargs):
return render(self.request, "taxation/taxation_list.html")
class November_2019View(View):
def get(self, *args, **kwargs):
return render(self.request, "taxation/2019_November_taxation.html")
class May_2019View(View):
def get(self, *args, **kwargs):
return render(self.request, "taxation/2019_May_taxation.html")
class November_2018View(View):
def get(self, *args, **kwargs):
return render(self.request, "taxation/2018_November_taxation.html") |
def detect_anagrams(the_word, word_list):
return [word for word in word_list
if sorted(the_word.lower()) ==
sorted(word.lower()) and
the_word.lower() != word.lower()]
|
#by 李星星
import poplib
import html
import time
import DBaction
from email.parser import Parser
from email.header import decode_header
from email.utils import parseaddr
email='1678120695@qq.com'
password='veztvpjocggzjbdb2'
password1='veztvpjocggzjbdb'
server='pop.qq.com'
def judgePass(E,P):
try:
server = poplib.POP3_SSL('pop.qq.com')
server.user(E)
server.pass_(P)
server.quit()
except:
return False
else:
return True
|
from django import forms
CATEGORIES = [
("Home", "Home"),
("Technology", "Technology"),
("Sport", "Sport"),
("Fashion", "Fashion")
]
"""
Source: https://docs.djangoproject.com/en/3.0/topics/forms/#rendering-fields-manually
https://docs.djangoproject.com/en/3.0/ref/forms/widgets/
"""
class ListingForm(forms.Form):
# widget là các thể sẽ đc render trong html TextInput <=> <input type = "text"...>
# attrs là các atributes của thẻ, phải đi kẻm widget
title = forms.CharField(min_length = 5, max_length = 64, widget=forms.TextInput (attrs={'class':'form-control'}))
# Các thẻ select có biến "choices" để thêm vào các <option>
category = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}), choices = CATEGORIES)
description = forms.CharField(min_length = 10, max_length = 256, widget=forms.TextInput(attrs={'class':'form-control'}))
starting_bid = forms.FloatField(widget=forms.NumberInput(attrs={'class':'form-control'}))
# Thêm value để user ko nhập xâu gì thì vx submit đc
image = forms.URLField(min_length = 0, max_length = 2048, widget=forms.URLInput
(attrs={'class':'form-control',
'value':'https://bom.to/79jrla'}))
class BidForm(forms.Form):
money = forms.FloatField(widget=forms.NumberInput(attrs={'class':'form-control', 'placeholder':'Place bid'}))
RATINGS= [
(5, "Excellent"),
(4, "Good"),
(3, "Normal"),
(2, "Bad"),
(1, "Terrible")
]
class CommentForm(forms.Form):
comment_content = forms.CharField(max_length = 512, widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':'Add Comment'}))
comment_rating = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}), choices = RATINGS) |
from django.contrib import admin
from doctors.models import Specialization, Domain, Doctor, Appointment, Review, LocationDoctor, BusinessWork
admin.site.register(Specialization)
admin.site.register(Domain)
admin.site.register(Doctor)
admin.site.register(Appointment)
admin.site.register(Review)
admin.site.register(LocationDoctor)
admin.site.register(BusinessWork) |
import numpy as np
from math import sqrt
import pandas as pd
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# sklearn
X = [[2, 3], [5, 4], [8, 1], [4, 7], [7, 2], [9, 6]]
y = [1, 0, 0, 0, 0, 0]
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(X, y)
print(neigh.predict([[3, 4.5]]))
print(neigh.predict_proba([[3, 4.5]]))
train = np.array([[2, 3, 1], [5, 4, 0], [8, 1, 0], [4, 7, 0], [7, 2, 0], [9, 6, 0]])
for i, arr in enumerate(train):
train[i] = np.array(arr)
test = np.array([[3, 4.5, 0]])
# iris = load_iris()
# df = pd.DataFrame(iris.data, columns=iris.feature_names)
# df['label'] = iris.target
# df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
#
#
# data = np.array(df.iloc[:100, [0, 1, -1]])
# train, test = train_test_split(data, test_size=0.4)
class Node:
def __init__(self, data, depth = 0, lchild = None, rchild = None):
self.data = data
self.depth = depth
self.lchild = lchild
self.rchild = rchild
class KdTree:
def __init__(self):
self.KdTree = None
self.n = 0
self.nearest = None
def build(self, dataset, depth=0):
if len(dataset) > 0:
m, n = np.shape(dataset)
self.n = n-1
axis = depth % self.n
mid = int(m / 2)
datasetcopy = sorted(dataset, key = lambda x: x[axis])
node = Node(datasetcopy[mid], depth)
if depth == 0:
self.KdTree = node
node.lchild = self.build(datasetcopy[ : mid], depth + 1)
node.rchild = self.build(datasetcopy[mid+1 : ], depth + 1)
return node
return None
def search(self, x, count = 1):
nearest = []
for i in range(count):
nearest.append([-1, None])
self.nearest = np.array(nearest)
def recurve(node):
if node is not None:
axis = node.depth % self.n
daxis = x[axis] - node.data[axis]
if daxis < 0:
recurve(node.lchild)
else:
recurve(node.rchild)
dist = sqrt(sum((p1 - p2) ** 2 for p1, p2 in zip(x, node.data)))
for i, d in enumerate(self.nearest):
if d[0] < 0 or dist < d[0]:
self.nearest = np.insert(self.nearest, i, [dist, node], axis = 0)
self.nearest = self.nearest[:-1]
break
# n = list(self.nearest[:, 0]).count(-1)
# if self.nearest[-n-1, 0] > abs(daxis):
if daxis < 0:
recurve(node.rchild)
else:
recurve(node.lchild)
recurve(self.KdTree)
knn = self.nearest[: 1]
belong = []
for i in knn:
belong.append(i[-1].data[-1])
b = max(set(belong), key=belong.count)
return self.nearest, b
kdt = KdTree()
kdt.build(train)
score = 0
for x in test:
near, belong = kdt.search(x[:-1], 5)
if belong == x[-1]:
score += 1
print('test: ')
print(x, 'predict:', belong)
print('nearest:')
for n in near:
print(n[1].data, 'dist:', n[0])
|
import torch
import torch.nn as nn
from models import model_utils
from utils import eval_utils
from collections import OrderedDict
import numpy as np
def fuse_features(feats, opt):
if opt['fuse_type'] == 'mean':
feat_fused = torch.stack(feats, 1).mean(1)
elif opt['fuse_type'] == 'max':
feat_fused, _ = torch.stack(feats, 1).max(1)
return feat_fused
def spherical_class_to_dirs(x_cls, y_cls, cls_num):
theta = (x_cls.float() + 0.5) / cls_num * 180 - 90
phi = (y_cls.float() + 0.5) / cls_num * 180 - 90
theta = theta.clamp(-90, 90) / 180.0 * np.pi
phi = phi.clamp(-90, 90) / 180.0 * np.pi
tan2_theta = pow(torch.tan(theta), 2)
y = torch.sin(phi)
z = torch.sqrt((1 - y * y) / (1 + tan2_theta))
x = z * torch.tan(theta)
dirs = torch.stack([x,y,z], 1)
dirs = dirs / dirs.norm(p=2, dim=1, keepdim=True)
return dirs
def convert_dirs(l_dirs_x, l_dirs_y, opt, dirs_step):
# soft-argmax
dirs_x = torch.cat(l_dirs_x, 0).squeeze()
dirs_y = torch.cat(l_dirs_y, 0).squeeze()
x_prob = torch.nn.functional.softmax(dirs_x, dim=1)
y_prob = torch.nn.functional.softmax(dirs_y, dim=1)
x_idx = (x_prob * dirs_step).sum(1)
y_idx = (y_prob * dirs_step).sum(1)
dirs = spherical_class_to_dirs(x_idx, y_idx, opt['dirs_cls'])
return dirs
def convert_intens(l_ints, opt, ints_step):
# soft-argmax
l_ints = torch.cat(l_ints, 0).view(-1, opt['ints_cls'])
int_prob = torch.nn.functional.softmax(l_ints, dim=1)
idx = (int_prob * ints_step).sum(1)
ints = eval_utils.class_to_light_ints(idx, opt['ints_cls'])
ints = ints.view(-1, 1).repeat(1, 3)
return ints
class FeatExtractor(nn.Module):
def __init__(self, opt, c_in=4, c_out=256):
super(FeatExtractor, self).__init__()
batchNorm = opt['use_BN']
self.conv1 = model_utils.conv_layer(batchNorm, c_in, 32, k=3, stride=2, pad=1, afunc='LReLU')
self.conv2 = model_utils.conv_layer(batchNorm, 32, 64, k=3, stride=2, pad=1)
self.conv3 = model_utils.conv_layer(batchNorm, 64, 64, k=3, stride=1, pad=1)
self.conv4 = model_utils.conv_layer(batchNorm, 64, 128, k=3, stride=2, pad=1)
self.conv5 = model_utils.conv_layer(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.conv6 = model_utils.conv_layer(batchNorm, 128, 128, k=3, stride=2, pad=1)
self.conv7 = model_utils.conv_layer(batchNorm, 128, 256, k=3, stride=1, pad=1)
def forward(self, inputs):
out = self.conv1(inputs)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out = self.conv7(out)
return out
class Classifier(nn.Module):
def __init__(self, opt, c_in):
super(Classifier, self).__init__()
batchNorm = opt['use_BN']
self.conv1 = model_utils.conv_layer(batchNorm, 512, 128, k=3, stride=1, pad=1)
self.conv2 = model_utils.conv_layer(batchNorm, 128, 128, k=3, stride=2, pad=1)
self.conv3 = model_utils.conv_layer(batchNorm, 128, 128, k=3, stride=2, pad=1)
self.conv4 = model_utils.conv_layer(batchNorm, 128, 128, k=3, stride=2, pad=1)
self.opt = opt
self.dir_x_est = nn.Sequential(
model_utils.conv_layer(batchNorm, 128, 64, k=1, stride=1, pad=0),
model_utils.output_conv(64, opt['dirs_cls'], k=1, stride=1, pad=0))
self.dir_y_est = nn.Sequential(
model_utils.conv_layer(batchNorm, 128, 64, k=1, stride=1, pad=0),
model_utils.output_conv(64, opt['dirs_cls'], k=1, stride=1, pad=0))
self.int_est = nn.Sequential(
model_utils.conv_layer(batchNorm, 128, 64, k=1, stride=1, pad=0),
model_utils.output_conv(64, opt['ints_cls'], k=1, stride=1, pad=0))
def forward(self, inputs):
out = self.conv1(inputs)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
outputs = {}
outputs['dir_x'] = self.dir_x_est(out)
outputs['dir_y'] = self.dir_y_est(out)
outputs['ints'] = self.int_est(out)
return outputs
class L_Net(nn.Module):
def __init__(self, opt, c_in):
super(L_Net, self).__init__()
self.opt = opt
self.featExtractor = FeatExtractor(self.opt, c_in=c_in, c_out=256)
self.classifier = Classifier(self.opt, c_in=512)
d_cls, i_cls = self.opt['dirs_cls'], self.opt['ints_cls']
self.register_buffer('dirs_step', torch.linspace(0, d_cls-1, d_cls))
self.register_buffer('ints_step', torch.linspace(0, i_cls-1, i_cls))
def forward(self, inputs):
feats = []
for i in range(len(inputs)):
out_feat = self.featExtractor(inputs[i])
feats.append(out_feat)
feat_fused = fuse_features(feats, self.opt)
l_dirs_x, l_dirs_y, l_ints = [], [], []
for i in range(len(inputs)):
net_input = torch.cat([feats[i], feat_fused], 1)
outputs = self.classifier(net_input)
l_dirs_x.append(outputs['dir_x'])
l_dirs_y.append(outputs['dir_y'])
l_ints.append(outputs['ints'])
pred = OrderedDict()
batch = inputs[0].shape[0]
dirs = convert_dirs(l_dirs_x, l_dirs_y, self.opt, self.dirs_step)
pred['dirs'] = torch.stack(torch.split(dirs, batch, 0), 1)
pred['dirs_x'] = torch.stack(l_dirs_x, 1).view(batch, len(inputs), self.opt['dirs_cls'])
pred['dirs_y'] = torch.stack(l_dirs_y, 1).view(batch, len(inputs), self.opt['dirs_cls'])
intens = convert_intens(l_ints, self.opt, self.ints_step)
pred['intens'] = torch.stack(torch.split(intens, batch, 0), 1)
pred['ints'] = torch.stack(l_ints, 1).view(batch, len(inputs), self.opt['ints_cls'])
return pred
|
from flask import Flask,render_template,request,jsonify,redirect,send_file
from flask import request
import requests
import json
from flask_restful import Resource, Api, reqparse
import string
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.errorhandler(405)
def page_not_found(e):
# note that we set the 404 status explicitly
return jsonify('Method not matched'), 405
@app.route('/api/v1/users',methods=['GET'])
def list_users():
'''
fields = ['username', 'password']
df = pd.read_csv('data.csv', skipinitialspace=True, usecols=fields)
if(len(list(df.username))==0):
#return jsonify(len(list(df.username)))
return jsonify(),204
'''
with open('data.txt','r') as fp:
info=json.load(fp)
if info==[]:
return jsonify(),204
else:
a=[]
for i in info:
a.append(i['username'])
return jsonify(a),200
#Login if username exists and password matches
#Add user if username dose not exists
@app.route('/api/v1/users',methods=['POST'])
def add_user():
#fields = ['username', 'password']
#df = pd.read_csv('data.csv', skipinitialspace=True, usecols=fields)
inp=request.get_json()
username = inp['username']
password = inp['password']
with open('data.txt') as fp:
info=json.load(fp)
# print (username)
# print (password)
if username and password:
if(len(password)!=40):
return jsonify({'message':'Password format is wrong!'}),400
for i in info:
if i['username']==username:
return jsonify({'message':'username_exist'}),400
new_info={}
new_info['username']=username
new_info['password']=password
l=list()
l.append(new_info)
updated_info=info+l
with open('data.txt','w') as fp1:
json.dump(updated_info,fp1)
return jsonify({'message':'user_added'}),201
else:
return jsonify({'message':'missing_Data'}),400
#Remove given user
@app.route('/api/v1/users/<username>',methods=['DELETE'])
def remove(username):
#fields = ['username', 'password']
#df = pd.read_csv('data.csv', skipinitialspace=True, usecols=fields)
with open('data.txt','r') as fp:
info=json.load(fp)
if (username):
for i in info:
if i['username']==username:
info.remove(i)
with open('data.txt','w') as fp1:
json.dump(info,fp1)
return jsonify({'message':'user_removed'}),200
return jsonify({'message':'user_dosent_exist'}),200
'''
if (username in list(df.username)):
df=df.drop(df.index[list(df.username).index(username)])
df.to_csv('data.csv', index=False)
return jsonify({'message':'user_removed'}),200
else:
return jsonify({'message':'user_dosent_exist'}),400
'''
else:
return jsonify({'message':'missing_data'}),400
if __name__ == '__main__':
app.run(host='0.0.0.0',port='80',debug=True)
|
from keras.engine import Model
from keras.layers import Flatten, Dense, Input
from keras_vggface.vggface import VGGFace
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
import cv2
import os
from flask import Flask, request, redirect, url_for, send_from_directory, render_template
from werkzeug import secure_filename
# basedir = os.path.abspath(os.path.dirname(__file__))
UPLOAD_FOLDER = 'static/upload'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def classify(img_face):
custom_vgg_model = load_model('saved_model.h5')
return custom_vgg_model.predict(img_face)
def predict(test_image):
face_present = False
modi_present = False
kejriwal_present = False
cl = ['arvind kejriwal', 'narendra modi']
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv2.imread(test_image)
if img is None:
return img, face_present, kejriwal_present, modi_present
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces_dec = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces_dec:
face_present = True
face = img[y:y+h, x:x+w]
# if face.shape[0] < 160:
face = cv2.resize(face, (224,224))
im = np.zeros((1, 224, 224, 3))
im[0,:,:,:] = face
pred = classify(im)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# print(np.argmax(pred, axis=1))
clno = np.argmax(pred, axis=1)[0]
if clno == 0:
kejriwal_present = True
elif clno == 1:
modi_present = True
text = cl[np.argmax(pred, axis=1)[0]]
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
return img, face_present, kejriwal_present, modi_present
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_done', filename=filename))
return render_template('home.html')
@app.route('/<filename>', methods=['GET'])
def upload_done(filename):
test_image = "static/upload/" + filename
predicted_image, face_p, kejriwal_p, modi_p = predict(test_image)
if predicted_image is not None:
cv2.imwrite("static/outputs/"+filename, predicted_image)
# return send_from_directory(app.config['UPLOAD_FOLDER'],
# filename)
print("static/outputs/"+filename)
return render_template('display_result.html', dis_img="static/outputs/"+filename, face_p=face_p, kejriwal_p=kejriwal_p, modi_p=modi_p)
if __name__ == '__main__':
app.run(debug=True) |
from app import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
""" many-to-many = User to Group table """
User_Group = db.Table("User_Group",
db.Column('id', db.Integer, primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('Group_id', db.Integer, db.ForeignKey('group.id'))
)
class User(UserMixin, db.Model):
""" User table """
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), index=True, unique=True)
password_hash = db.Column(db.String(128))
Group = db.relationship('Group', secondary=User_Group, backref=db.backref('users', lazy='dynamic'))
sent_messages = db.relationship('Messages', backref='sent', lazy=True)
inbox_messages = db.relationship('Msg_Recipient', backref='user_recipient', lazy=True)
is_active = db.Column(db.Boolean, default=True)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def valid_password(self, password):
return check_password_hash(self.password_hash, password)
class Group(db.Model):
""" Group table """
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
is_active = db.Column(db.Boolean, default=True)
inbox_messages = db.relationship('Msg_Recipient', backref='group_recipient', lazy=True)
|
class ItemPage():
# here are elements' ids or x_paths in item page
itemPageTitle_id = "sg.com"
topItem_xp = "TextView[3]"
topItemText_xp = "TextView[3]"
filterBtn_id = "sg.com"
resolutionSwitch_id = "sg.com"
scheduleSwitch_id = "sg.com"
meetingSwitch_id = "sg.com"
showResultsBtn_id = "sg.com"
searchBarBtn_id = "sg.com"
searchBtn_id = "sg.com"
searchTextBox_id = "sg.com"
searchCloseBtn_id = "sg.com"
itemTypeText_rssid = "sg.com"
|
import unittest
from six import string_types
from pandas.core.frame import DataFrame
from opengrid.library.kmi import *
class KMITest(unittest.TestCase):
"""
Class for testing the kmi web scraper
"""
def test_fetch_website(self):
"""
Check if the URL works
"""
self.assertIsInstance(fetch_website(), string_types)
def test_get_kmi_current_month(self):
"""
Check if the top function returns a dataframe
"""
self.assertIsInstance(get_kmi_current_month(), DataFrame)
if __name__ == '__main__':
unittest.main()
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils, lists
module_type = utils.CachedType("struct module")
def module_list():
global module_type
modules = utils.gdb_eval_or_none("modules")
if modules is None:
return
module_ptr_type = module_type.get_type().pointer()
for module in lists.list_for_each_entry(modules, module_ptr_type, "list"):
yield module
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
layout = module['core_layout']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(layout['base']).split()[0],
name=module['name'].string(),
size=str(layout['size']),
ref=str(module['refcnt']['counter'] - 1)))
t = self._module_use_type.get_type().pointer()
first = True
sources = module['source_list']
for use in lists.list_for_each_entry(sources, t, "source_list"):
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
gdb.write("\n")
LxLsmod()
|
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules = [
Extension(
"asfamcparser",
["AMCFileReader.pyx"],
libraries=["m"],
extra_compile_args = ["-ffast-math"]
)
]
setup(
name = "asfamcparser",
include_dirs = [np.get_include()],
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules
)
|
#oef5
n = input("Give a number: ")
result = int(n)+int(n+n)+int(n+n+n)
print("The result is : {}".format(result)) |
import pickle
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route("/", methods= ["GET","POST"])
@app.route("/login", methods=['POST','GET'])
def login():
return render_template("login.html")
@app.route("/about", methods=['POST','GET'])
def about():
return render_template("about.html")
@app.route("/faq", methods=['POST','GET'])
def faq():
return render_template("faq.html")
@app.route("/home", methods=['POST','GET'])
def content():
if request.method == 'POST':
Age = request.form['age']
BMI = request.form['bmi']
Gender = request.form['gender']
Smoker = request.form['smoker']
Location = request.form['location']
Children = request.form['children']
data = [[int(Age), float(BMI), int(Gender), int(Smoker), int(Location), int(Children)]]
with open('mainmodel.pickle','rb') as file:
model= pickle.load(file)
print(data)
predict =model.predict(data)[0]
print(predict)
return render_template('result.html', prediction = predict)
return render_template("content.html" )
if __name__ == '__main__':
app.run(debug=True)
|
from suds.transport import Reply
from http.client import HTTPMessage
import unittest.mock as mock
import soap
import re
from .http import HttpTransport
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
pass
class XMLAssertions(object):
def assertNodeCount(self, xml_str, xpath, num):
doc = etree.fromstring(xml_str)
nodes = doc.xpath(xpath)
self.assertEqual(num, len(nodes))
def assertNodeText(self, xml_str, xpath, expected):
doc = etree.fromstring(xml_str)
nodes = doc.xpath(xpath)
self.assertTrue(len(nodes) > 0)
for node in nodes:
self.assertEqual(expected, node.text)
def assertNodeAttributes(self, xml_str, xpath, attributes):
doc = etree.fromstring(xml_str)
nodes = doc.xpath(xpath)
self.assertTrue(len(nodes) > 0)
for node in nodes:
for attribute, value in attributes.items():
self.assertTrue(attribute in node.attrib)
self.assertEqual(value, node.attrib[attribute])
class SoapTest(XMLAssertions):
def setUp(self):
soap.clients = {}
def _build_transport_with_reply(self, body, status=200, pattern=None, test_request=None):
headers = HTTPMessage()
headers.add_header('Content-Type', 'text/xml; charset=utf-8')
reply = Reply(status, headers, body)
transport = HttpTransport()
def surrogate(request, *args, **kwargs):
if pattern and not re.search(pattern, request.url):
return HttpTransport.send(transport, *args, **kwargs)
if test_request:
test_request(request)
return reply
transport.send = mock.MagicMock()
transport.send.side_effect = surrogate
return transport
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 20:39:13 2021
@author: Gustavo
@mail: gustavogodoy85@gmail.com
"""
def tabla_mult(number):
number = number
header = ('0','1','2','3','4','5','6','7','8','9')
print(f'{"":>4s} {"%4s %4s %4s %4s %4s %4s %4s %4s %4s %4s" % header}')
print(f'{"":->55}')
row = 0
col = 0
while row <= number:
numbers = []
num = 0
for col in range (10):
numbers.append(str(num))
num += row
numbers = tuple(numbers)
print(f'{str(row)+":":>4s} {"%4s %4s %4s %4s %4s %4s %4s %4s %4s %4s" % numbers}')
row += 1
tabla = tabla_mult(9)
#%% version nueva
def tabla_mult(number):
for n in range (10):
print(f'{n:4d}', end=' ')
print(f'{"":->50}')
row = 0
col = 0
while row <= number:
num = 0
for col in range (10):
print(f'{num:4d}', end=" ")
num += row
print('')
row += 1
tabla = tabla_mult(9) |
#!/usr/bin/python
# coding=utf-8
"""
Author: moshed
Created on 21/12/2020
"""
from pysat.solvers import Solver
from pysat.solvers import Glucose3
ids = ["311395834", "314981259"]
F, T = False, True
status_map = {'U': 0, 'H': 1, 'S': 2, 'I': 3, 'Q': 4, '?': 5, 'SN': 6, 'R': 7, 'LQ': 8, 'EQ': 9, 'VAC': 10}
# translator: '-'9XXXX <-> 'not' (9,(i,j), t , status) 9-> to keep leading zeros
def int_plus(int_list, isNot=False):
concat = '9' if not isNot else '-9'
for x in int_list:
concat += str(x)
return int(concat)
def linearity_constraints(objs_count, row_count, col_count):
clauses = []
# res = list(itertools.combinations(test_dict, 2)) - check performance
couples = [(x, y) for idx, x in enumerate(list(status_map)[:6]) for y in (list(status_map)[:6])[idx + 1:]]
for t in range(objs_count):
for i in range(row_count):
for j in range(col_count):
for c in couples:
clauses.append([int_plus([i, j, t, status_map[c[0]]], isNot=True),
int_plus([i, j, t, status_map[c[1]]], isNot=True)])
return clauses
def clauses_parse(phrase, predicates):
clauses = []
and_split = phrase.split(' ∧ ')
for a_n in and_split:
temp = []
or_split = a_n.replace('(', '').replace(')', '').split(' ∨ ')
for o_r in or_split:
if o_r.startswith('¬'):
temp.append(-predicates[o_r[1]])
else:
temp.append(predicates[o_r[0]])
clauses.append(temp)
return clauses
def R_Implication(i, j, t):
pysat_clauses = []
if t < 3:
pysat_clauses.append([int_plus([i, j, t, status_map['R']], isNot=True)])
else:
predicates = {
'a': int_plus([i, j, t, status_map['R']]),
'b': int_plus([i, j, t - 1, status_map['S']]),
'c': int_plus([i, j, t - 2, status_map['S']]),
'd': int_plus([i, j, t - 3, status_map['S']])
}
# a <-> ( b && c && d)
pysat_clauses += clauses_parse('(¬a ∨ b) ∧ (¬a ∨ c) ∧ (¬a ∨ d) ∧ (a ∨ ¬b ∨ ¬c ∨ ¬d)', predicates)
return pysat_clauses
def SN_Implication(i, j, t, S_Neighbors):
pysat_clauses = []
S_Neighbors_count = len(S_Neighbors[t][i][j])
predicates = {
'a': int_plus([i, j, t, status_map['SN']]),
'b': S_Neighbors[t][i][j][0],
'c': S_Neighbors[t][i][j][1],
}
if S_Neighbors_count == 2:
# a <-> ( b || c)
pysat_clauses += clauses_parse('(¬a ∨ b ∨ c) ∧ (a ∨ ¬b) ∧ (a ∨ ¬c)', predicates)
if S_Neighbors_count == 3:
predicates['d'] = S_Neighbors[t][i][j][2]
# a <-> ( b || c || d)
pysat_clauses += clauses_parse('(¬a ∨ b ∨ c ∨ d) ∧ (a ∨ ¬b) ∧ (a ∨ ¬c) ∧ (a ∨ ¬d)', predicates)
if S_Neighbors_count == 4:
predicates['d'] = S_Neighbors[t][i][j][2]
predicates['e'] = S_Neighbors[t][i][j][3]
# a <-> ( b || c || d || e)
pysat_clauses += clauses_parse('(¬a ∨ b ∨ c ∨ d ∨ e) ∧ (a ∨ ¬b) ∧ (a ∨ ¬c) ∧ (a ∨ ¬d) ∧ (a ∨ ¬e)', predicates)
return pysat_clauses
def U_Implication(i, j, t):
pysat_clauses = []
predicates = {
'a': int_plus([i, j, t, status_map['H']]),
'b': int_plus([i, j, t - 1, status_map['H']]),
}
# a <-> b
pysat_clauses.append([predicates['a']])
pysat_clauses += clauses_parse('(¬a ∨ b) ∧ (a ∨ ¬b)', predicates)
return pysat_clauses
def H_Implication(i, j, t, S_Neighbors):
pysat_clauses = []
predicates = {
'a': int_plus([i, j, t, status_map['H']]),
'b': int_plus([i, j, t - 1, status_map['H']]),
'c': int_plus([i, j, t - 1, status_map['SN']]),
'd': int_plus([i, j, t, status_map['R']])
}
# a <-> (b & ~c) || d
pysat_clauses.append([predicates['a']])
pysat_clauses += clauses_parse('(¬a ∨ b ∨ d) ∧ (¬a ∨ ¬c ∨ d) ∧ (a ∨ ¬b ∨ c) ∧ (a ∨ ¬d)', predicates)
pysat_clauses += R_Implication(i, j, t)
pysat_clauses += SN_Implication(i, j, t - 1, S_Neighbors)
return pysat_clauses
def S_Implication(i, j, t, S_Neighbors):
pysat_clauses = []
predicates = {
'a': int_plus([i, j, t, status_map['S']]),
'b': int_plus([i, j, t - 1, status_map['S']]),
'c': int_plus([i, j, t, status_map['R']]),
'd': int_plus([i, j, t - 1, status_map['H']]),
'e': int_plus([i, j, t - 1, status_map['SN']])
}
pysat_clauses.append([predicates['a']])
# a <-> ((b && ~c) || (d && e))
pysat_clauses += clauses_parse(
'(¬a ∨ b ∨ d) ∧ (¬a ∨ b ∨ e) ∧ (¬a ∨ ¬c ∨ d) ∧ (¬a ∨ ¬c ∨ e) ∧ (a ∨ ¬b ∨ c) ∧ (a ∨ ¬d ∨ ¬e)', predicates)
pysat_clauses += R_Implication(i, j, t)
pysat_clauses += SN_Implication(i, j, t - 1, S_Neighbors)
return pysat_clauses
def I_Implication(i, j, t):
pysat_clauses = []
predicates = {
'a': int_plus([i, j, t, status_map['I']]),
'b': int_plus([i, j, t - 1, status_map['I']]),
'c': int_plus([i, j, t, status_map['VAC']])
}
# a <-> (b || c)
pysat_clauses += clauses_parse('(¬a ∨ b ∨ c) ∧ (a ∨ ¬b) ∧ (a ∨ ¬c)', predicates)
return pysat_clauses
def Q_Implication(i, j, t):
pysat_clauses = []
predicates = {
'a': int_plus([i, j, t, status_map['Q']]),
'b': int_plus([i, j, t - 1, status_map['Q']]),
'c': int_plus([i, j, t, status_map['LQ']]),
'd': int_plus([i, j, t, status_map['EQ']])
}
# a <-> (b && ~c) || d
pysat_clauses += clauses_parse('(¬a ∨ b ∨ d) ∧ (¬a ∨ ¬c ∨ d) ∧ (a ∨ ¬b ∨ c) ∧ (a ∨ ¬d)', predicates)
return pysat_clauses
def solve_problem(input):
s = Solver()
res = {}
status_dict = {'U': {}, 'H': {}, 'S': {}, 'I': {}, 'Q': {}}
police, medics, observations, queries = input['police'], input['medics'], input['observations'], input['queries']
objs_count = len(observations)
row_count = len(observations[0])
col_count = len(observations[0][0])
# if input['police'] is 0 and input['medics'] is 0:
pysat_clauses = []
pysat_clauses += linearity_constraints(objs_count, row_count, col_count)
for status in status_dict:
status_dict[status] = {o: [[F] * col_count for _ in range(row_count)] for o in range(objs_count)}
for t, obs in enumerate(observations):
for i, row in enumerate(obs):
for j, cell in enumerate(row):
if cell in status_dict:
status_dict[cell][t][i][j] = T
elif cell == '?':
for status in status_dict:
status_dict[status][t][i][j] = '?'
S_Neighbors = {o: [[F] * col_count for _ in range(row_count)] for o in range(objs_count)}
for t, obs in enumerate(observations):
for i, row in enumerate(obs):
for j, cell in enumerate(row):
temp = []
if i - 1 >= 0:
temp.append(int_plus([i - 1, j, t, status_map['S']]))
if j - 1 >= 0:
temp.append(int_plus([i, j - 1, t, status_map['S']]))
if i + 1 < len(obs):
temp.append(int_plus([i + 1, j, t, status_map['S']]))
if j + 1 < len(row):
temp.append(int_plus([i, j + 1, t, status_map['S']]))
S_Neighbors[t][i][j] = temp
start_observation = 0
for i in range(len(status_dict['U'][start_observation])):
for j in range(len(status_dict['U'][start_observation][0])):
if status_dict['U'][start_observation][i][j] == T:
pysat_clauses.append([int_plus([i, j, start_observation, status_map['U']])])
elif status_dict['H'][start_observation][i][j] == T:
pysat_clauses.append([int_plus([i, j, start_observation, status_map['H']])])
elif status_dict['S'][start_observation][i][j] == T:
pysat_clauses.append([int_plus([i, j, start_observation, status_map['S']])])
elif status_dict['I'][start_observation][i][j] == T:
pysat_clauses.append([int_plus([i, j, start_observation, status_map['I']])])
elif status_dict['Q'][start_observation][i][j] == T:
pysat_clauses.append([int_plus([i, j, start_observation, status_map['Q']])])
# U Implication
for t in range(1, len(status_dict['U'])):
for i, row in enumerate(status_dict['U'][t]):
for j, cell in enumerate(row):
if cell == T:
pysat_clauses.append([int_plus([i, j, t, status_map['U']])])
pysat_clauses.append([int_plus([i, j, t, status_map['U']], isNot=True),
int_plus([i, j, t - 1, status_map['U']])])
# H Implication
for t in range(1, len(status_dict['H'])):
for i, row in enumerate(status_dict['H'][t]):
for j, cell in enumerate(row):
if cell == T:
pysat_clauses += H_Implication(i, j, t, S_Neighbors)
# S Implication
for t in range(1, len(status_dict['S'])):
for i, row in enumerate(status_dict['S'][t]):
for j, cell in enumerate(row):
if cell == T:
pysat_clauses += S_Implication(i, j, t, S_Neighbors)
knowledge_base = pysat_clauses
""" Queries Parsing"""
if input['police'] is 0 and input['medics'] is 0:
for query in queries:
test_query = []
s = Solver()
if query[1] == 0:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[query[2]]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
for status in status_map:
if status in ['U', 'H', 'S', 'I', 'Q']:
if status != query[2]:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[status]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
break
if query[1] >= 1:
if query[2] == 'U':
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'H':
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'S':
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append(
[int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
if input['police'] is 0 and input['medics'] >= 1:
for query in queries:
test_query = []
s = Solver()
if query[1] == 0:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[query[2]]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
for status in status_map:
if status in ['U', 'H', 'S']:
if status != query[2]:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[status]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
break
if query[1] >= 1:
if query[2] == 'U':
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'H':
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'S':
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append(
[int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'I':
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
for clause in test_query:
s.add_clause(clause)
result = s.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
if input['police'] >= 1 and input['medics'] is 0:
for query in queries:
test_query = []
s = Solver()
if query[1] == 0:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[query[2]]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
for status in status_map:
if status in ['U', 'H', 'S']:
if status != query[2]:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[status]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
break
if query[1] >= 1:
if query[2] == 'U':
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'H':
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'S':
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append(
[int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'Q':
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
if input['police'] >= 1 and input['medics'] >= 1:
for query in queries:
test_query = []
s = Solver()
if query[1] == 0:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[query[2]]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
for status in status_map:
if status in ['U', 'H', 'S']:
if status != query[2]:
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map[status]])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
break
if query[1] >= 1:
if query[2] == 'U':
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'H':
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'S':
test_query = knowledge_base.copy()
test_query += S_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
test_query = knowledge_base.copy()
test_query.append([int_plus([query[0][0], query[0][1], query[1], status_map['U']])])
test_query.append(
[int_plus([query[0][0], query[0][1], query[1], status_map['U']], isNot=True),
int_plus([query[0][0], query[0][1], query[1] - 1, status_map['U']])])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += H_Implication(query[0][0], query[0][1], query[1], S_Neighbors)
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if result:
res[query] = '?'
continue
elif query[2] == 'Q':
test_query = knowledge_base.copy()
test_query += Q_Implication(query[0][0], query[0][1], query[1])
with Glucose3(bootstrap_with=test_query) as g:
result = g.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
elif query[2] == 'I':
test_query = knowledge_base.copy()
test_query += I_Implication(query[0][0], query[0][1], query[1])
for clause in test_query:
s.add_clause(clause)
result = s.solve()
if not result:
res[query] = 'F'
else:
res[query] = 'T'
return res
|
import os
# Cache Dosyasını bulmak için yapmanız gerekenler:
# Windows arama yerine %appdata% yazın.
# Discord dosyasını açın.
# İçinde bulunan cache dosyasının konumunu kopyalayın
print("\u001b[35;1mDiscord Cache Decrypter")
print("\u001b[37;1mMert Kemal Atılgan tarafından kodlanmıştır.")
print("https://github.com/mertatilgan\n")
path = input("Discord'un Cache klasörünün konumunu girin: ")
files = os.listdir(path)
i = 1
for file in files:
os.rename(os.path.join(path, file), os.path.join(path, str(i)+'.png'))
i = i+1
print("\u001b[32;1m[!] İşlem başarılı. \u001b[33;1m"+path+"\u001b[32;1m konumunda bulunan dosyalar .png formatına çevrildi.\u001b[37;1m")
|
TOKEN = '1505312478:AAHf1SaNEL4TntYbOrjS6NkSmjIHxqhhYok' |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib as mpl
from scipy.stats import multivariate_normal
from sklearn.mixture import GaussianMixture
from sklearn.metrics.pairwise import pairwise_distances_argmin
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# enable Chinese code
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
if __name__ == '__main__':
style = 'myself'
np.random.seed(0)
mu1_fact =(0,0,0)
cov1_fact = np.diag((1,2,3))
data1 = np.random.multivariate_normal(mu1_fact,cov1_fact,400)
mu2_fact = (2,2,1)
cov2_fact = np.array(((1,1,3),(1,2,1),(0,0,1)))
data2 = np.random.multivariate_normal(mu2_fact,cov2_fact,100)
data = np.vstack((data2,data1))
y= np.array([True] * 400 + [False] * 100)
if style == 'sklearn':
# n_components类别
g = GaussianMixture(n_components=2,covariance_type='full',tol=1e-6,max_iter=1000)
g.fit(data)
# weight表示第一个类别占比全类型的比例
print('类别概率:\t',g.weights_[0])
print('均值:\t',g.means_)
print('方差:\t',g.covariances_)
mu1,mu2 = g.means_
sigma1,sigma2 = g.covariances_
else:
num_iter = 100
n, d = data.shape
# 随机指定
# mu1 = np.random.standard_normal(d)
# mu2 =np.random.standard_normal(d)
# print (mu1,mu2)
mu1 = data.min(axis=0)
mu2 = data.max(axis=0)
sigma1 = np.identity(d)
sigma2 = np.identity(d)
pi = 0.5
# EM solution algorithm
for i in range(num_iter):
# E Step
norm1 = multivariate_normal(mu1,sigma1)
norm2 = multivariate_normal(mu2,sigma2)
tau1 = pi * norm1.pdf(data)
tau2 = (1-pi) * norm2.pdf(data)
gamma = tau1 / (tau1 + tau2)
# M Step
mu1 = np.dot(gamma,data) / np.sum(gamma)
mu2 = np.dot((1 - gamma),data) / np.sum( 1- gamma)
sigma1 = np.dot(gamma*(data-mu1).T,data - mu1)/ np.sum(gamma)
sigam2 = np.dot((1-gamma)*(data - mu2).T,data-mu2)/np.sum(1-gamma)
pi = np.sum(gamma) / n
print (i, '\t:', mu1,mu2)
print('类别概率:\t',pi)
print('均值:\t', mu1,mu2)
print('方差:\t', sigma1,'\n',sigma2)
# 预测分类
norm1 = multivariate_normal(mu1,sigma1)
norm2 = multivariate_normal(mu2, sigam2)
tau1 =norm1.pdf(data)
tau2= norm2.pdf(data)
fig =plt.figure(figsize=(13,7),facecolor='w')
ax =fig.add_subplot(121,projection='3d')
ax.scatter(data[:,0],data[:,1],data[:,2],c='b',s=30,marker='o',depthshade=True)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(u'原始数据',fontsize=18)
ax = fig.add_subplot(122,projection='3d')
order = pairwise_distances_argmin([mu1_fact,mu2_fact],[mu1,mu2],metric='euclidean')
print (order)
if order[0] == 0:
c1 = tau1>tau2
else:
c1 = tau1<tau2
c2 = ~c1
acc =np.mean(y == c1)
print (u'准确率: %.2f%%' % (100*acc))
ax.scatter(data[c1,0], data[c1,1],data[c1,2],c='r',s=30,marker='o',depthshade=True)
ax.scatter(data[c2, 0], data[c2, 1], data[c2, 2], c='g', s=30, marker='^', depthshade=True)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(u'EM算法分类',fontsize=18)
plt.title(u'EM算法的实现',fontsize=21)
plt.subplots_adjust(top=0.90)
plt.tight_layout()
plt.show()
|
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next: return True
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
fast = slow.next
slow.next, nhead = None, None
while fast:
tmp = fast.next
fast.next = nhead
nhead = fast
fast = tmp
while head and nhead:
if head.val != nhead.val: return False
head = head.next
nhead = nhead.next
return True |
'''
Created on Oct 18, 2011
@author: Rob
'''
import morpher.pydbg.pydbg as pydbg
import morpher.pydbg.defines as defines
import struct
def sprintf_handler(dbg):
addr = dbg.context.Esp + 0xC
count = dbg.read_process_memory(addr, 4)
count = int(struct.unpack("L",count)[0])
print "Caught myself a sprintf with a counter of %d!" % count
return defines.DBG_CONTINUE
if __name__ == '__main__':
dbg = pydbg.pydbg()
pid = int(raw_input("Enter PID of process: "))
dbg.attach(pid)
print "Running...."
sprintf_address = dbg.func_resolve("msvcrt.dll", "sprintf")
dbg.bp_set(sprintf_address, description="sprintf_address", handler=sprintf_handler)
dbg.run()
|
from protorpc import messages
class IngredientMessage(messages.Message):
ingredient = messages.StringField(1, required=True)
quantity = messages.FloatField(2, required=True)
unit = messages.StringField(3, required=True)
class RecipeMessage(messages.Message):
title = messages.StringField(1, required=True)
author = messages.StringField(2, required=True)
cookbook = messages.StringField(3, required=True)
photo_url = messages.StringField(4, required=True)
ingredients = messages.MessageField(IngredientMessage, 5, repeated=True)
class GetRecipesRequest(messages.Message):
user_id = messages.StringField(1, required=True)
include_ingredients = messages.StringField(2, required=False)
class GetRecipesResponse(messages.Message):
recipes = messages.MessageField(RecipeMessage, 1, repeated=True)
|
import numpy as np
def xavier_initializer(shape):
coeff = np.sqrt(2/(shape[0]+shape[1]))
return normal_initializer(shape)*coeff
def normal_initializer(shape):
return np.random.randn(shape[0], shape[1])
def get_initializer(name):
return {'xavier': xavier_initializer,
'normal': normal_initializer}[name]
|
__author__ = 'Dell'
import csv
from datetime import datetime
# import matplotlib
# matplotlib.use('ps')
import matplotlib.pyplot as plt
import numpy as np
startreader = csv.reader(open("start-fav-indegree.csv", "r"), delimiter='\t')
endreader = csv.reader(open("end-fav-indegree.csv", "r"), delimiter='\t')
base = datetime.strptime('2006-11-02', "%Y-%m-%d")
end = datetime.strptime('2007-05-18', "%Y-%m-%d")
num = (end-base).days+1
startdegrees = []
enddegree = dict((int(row[0]), int(row[1])) for row in endreader)
y = []
for row in startreader:
startdegrees.append(int(row[1]))
y.append(float(enddegree[int(row[0])]-int(row[1]))/float(num))
numbins = np.array(startdegrees).max()
print numbins
n, binlist = np.histogram(startdegrees, bins=numbins)
sy, _ = np.histogram(startdegrees, bins=numbins, weights=y)
plotx = []
mean = []
for i in xrange(len(n)):
if n[i] == 0 or sy[i] == 0:
continue
else:
meanval = float(sy[i])/float(n[i])
# pref_fav1
# if i+1 >= 699 and i+1 <= 35000 and meanval >= 0.001 and meanval <= 0.32:
# continue
# else:
# pref_fav2
# if i+1 >= 370 and i+1 <= 10000 and meanval >= 0.001 and meanval <= 0.16:
# continue
# else:
# pref_rec2
if i+1 >= 300 and i+1 <= 1000 and meanval >= 0.001 and meanval <= 0.16:
continue
else:
mean.append(float(sy[i])/float(n[i]))
plotx.append(i+1)
plt.grid()
plt.xlabel('Favorite Indegree (bin)')
plt.ylabel('Initiating Favorites Received (new user favorites/user/day)')
plt.loglog(plotx, mean, '+')
plt.show()
# plt.savefig('pref_rec2.eps', format='eps', dpi=1000)
|
#!/usr/bin/env python
import argparse
from datetime import datetime
from neomodel import config
from runner import HdfsToNeo4j
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Import HDFS Directory to Neo4j.')
parser.add_argument('--neo4j-url', type=str,
dest='neo4j_url', default='bolt://neo4j:neo4j@localhost:7687',
help="Bolt Scheme URL (default is 'bolt://neo4j:neo4j@localhost:7687')")
parser.add_argument('--timestamp', type=str,
dest='timestamp', default=datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
help='Date and time for this version (default is now)')
parser.add_argument('name', type=str,
help='Symbolic import name (all nodes will be it\'s children)')
parser.add_argument('directory', type=str,
help='HDFS Directory to import')
args = parser.parse_args()
config.DATABASE_URL = args.neo4j_url
HdfsToNeo4j(args.name, args.directory, args.timestamp).update()
|
#!/usr/bin/env python3
import argparse
import os
import sys
from mpi4py import MPI
import numpy as np
import adios2
import plxr
from PIL import Image
## viewer.py
usage_msg = """Usage: plxr <operation> <op_args>
Where <operation> is one of the following:
extract
insert
list
"""
def commandline (argv):
parser = argparse.ArgumentParser(prog='plxr')
subparsers = parser.add_subparsers(help='sub help', dest="subcommand")
_parser_template = subparsers.add_parser('list', help="List images in a bp file")
_parser_template.add_argument('bpfile')
_parser_template = subparsers.add_parser('insert', help="Add image to a bp file. Create bp file if necessary.")
_parser_template.add_argument('bpfile')
_parser_template.add_argument('image_file')
_parser_template.add_argument('image_name')
_parser_template = subparsers.add_parser('extract', help="Extract image from a bp file.")
_parser_template.add_argument('bpfile')
_parser_template.add_argument('image_name')
_parser_template.add_argument('--filename', required=False)
return (parser.parse_args(argv[1:])) # Skip the program name, and pass the rest to the parser
def main(argv):
config = commandline(argv)
if config.subcommand == "list":
do_list (config)
elif config.subcommand == "extract":
do_extract(config)
elif config.subcommand == "insert":
do_insert(config)
else:
print ("unknown command, exiting")
def do_list(config):
comm = MPI.COMM_SELF
with adios2.open(config.bpfile, "r", comm) as fh:
# Query available images
names = plxr.get_image_names_hl (fh)
for name in names:
print (name)
def do_insert(config):
comm = MPI.COMM_SELF
if os.path.isfile(config.bpfile):
mode_char = 'a'
else:
mode_char = 'w'
with adios2.open(config.bpfile, mode_char, comm) as fh:
# Load image
img = Image.open (config.image_file).convert("RGB")
plxr.write_png_image_hl (fh, img, config.image_name, end_step=True)
# Assumes all steps have this image, need to revisit if not true...
def do_extract(config):
step = 0
comm = MPI.COMM_SELF
#Open the bpfile
with adios2.open(config.bpfile, "r", comm) as fh:
for ad_step in fh:
pimg = plxr.read_image_hl (ad_step, config.image_name)
image_prefix = config.filename if config.filename else config.image_name
pimg.save("%s_%i.png"%(image_prefix, step) )
step = step + 1
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/python
#\file concat_imgs.py
#\brief certain python script
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Aug.26, 2021
import cv2
import numpy as np
if __name__=='__main__':
img1= cv2.imread('../cpp/sample/rtrace1.png')
img2= cv2.flip(img1, 0)
cat_v= np.concatenate((img1,img2), axis=0)
cat_h= np.concatenate((img1,img2), axis=1)
cv2.imshow('concatenate vertically', cat_v)
cv2.imshow('concatenate horizontally', cat_h)
while cv2.waitKey() not in map(ord,[' ','q']): pass
|
from sklearn.datasets import load_iris
iris = load_iris()
# print(iris.data)
# print(iris.target)
from sklearn.preprocessing import StandardScaler
print("standard scaler:")
print(StandardScaler().fit_transform(iris.data))
from sklearn.preprocessing import MinMaxScaler
print("min max scaler")
print(MinMaxScaler().fit_transform(iris.data))
from sklearn.preprocessing import Normalizer
print("normalizer")
print(Normalizer().fit_transform(iris.data))
from sklearn.preprocessing import Binarizer
print("binarizer")
print(Binarizer(threshold=3).fit_transform(iris.data))
import pandas as pd
|
#!/usr/local/bin/python3
# Decided to try mkaing my own interpretation of a deck just to see how it would compare to the books.
import collections
class MyDeck:
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 'J', 'Q', 'K', 'A']
suits = ['Spades', 'Diamonds', 'Hearts', 'Clubs']
Card = collections.namedtuple('Card', ['numbers', 'suits'])
|
'''
Given an int n, return True if it is within 10 of 100 or 200.
Note: abs(num) computes the absolute value of a number.
near_hundred(93) → True
near_hundred(90) → True
near_hundred(89) → False
'''
def near_hundred(n):
return (-10 <= n - 100 <= 10) | (-10 <= n - 200 <= 10) |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from sklearn.model_selection import train_test_split, StratifiedKFold
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten, MaxPooling2D, Dropout
from tensorflow.keras.layers import BatchNormalization, ZeroPadding2D, Activation, Add, GlobalAveragePooling2D
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
train_data = pd.read_csv('../data/mnist_data/train.csv', index_col=0, header=0)
print(train_data)
img = train_data.iloc[0,2:].values.reshape(28,28).astype(np.uint8)
img_2 = cv2.dilate(img, kernel=np.ones((2,2), np.uint8), iterations=1)
img_2 = cv2.medianBlur(src=img, ksize=5)
img_2 = np.where(img_2>=10, img_2, 0)
print(img_2.shape)
print(img_2)
'''
# 그림 확인
cv2.imshow('before',img)
cv2.imshow('after',img_2)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.imshow(img_2)
plt.show()
'''
datagen = ImageDataGenerator(
rotation_range=360
)
train_letter = train_data['letter'].values
x_train = train_data.drop(['digit', 'letter'], axis=1).values
x_train = x_train.reshape(-1, 28, 28, 1)
x_train = x_train/255
print(x_train.shape) # (2048, 28, 28, 1)
y = train_data['letter']
alpha_2_num = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10,
'L':11, 'M':12, 'N':13, 'O':14, 'P':15, 'Q':16, 'R':17, 'S':18, 'T':19, 'U':20,
'V':21, 'W':22, 'X':23, 'Y':24, 'Z':25}
y = y.map(alpha_2_num)
y_train = np.zeros((len(y), len(y.unique())))
for i, letter in enumerate(y):
y_train[i, letter] = 1
print(y_train)
print(y_train.shape)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42, stratify=y_train)
# 모델
input_tensor = Input(shape=x_train.shape[1:], dtype='float32', name='input')
def conv1_layer(x):
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(64, (7, 7), strides=(1, 1))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1,1))(x)
return x
def conv2_layer(x):
x = MaxPooling2D((3, 3), 2)(x)
shortcut = x
for i in range(2):
if (i == 0):
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv3_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv4_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv5_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(512, (3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(512, (3, 3), strides=(2, 2), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
x = conv1_layer(input_tensor)
x = conv2_layer(x)
x = conv3_layer(x)
x = conv4_layer(x)
x = conv5_layer(x)
x = GlobalAveragePooling2D()(x)
output_tensor = Dense(26, activation='softmax')(x)
resnet18 = Model(input_tensor, output_tensor)
resnet18.summary()
model = resnet18
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
file_path = './dacon3/data/mnist_alpha_resnet_test.hdf5'
es = EarlyStopping(monitor='val_accuracy', patience=80)
cp = ModelCheckpoint(filepath=file_path, monitor='val_accuracy', save_best_only=True)
lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, patience=30)
# history = model.fit(x_train, y_train, epochs=5000, batch_size=32, validation_data=(x_val, y_val), verbose=2, callbacks=[es,cp,lr])
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16), epochs=2000,
validation_data=(datagen.flow(x_val, y_val)), verbose=2, callbacks=[es, cp, lr]) |
from back_machine.config.parser import get_config_from_json
import argparse
import time
from math import ceil
import zmq
def collector(addressReceive, addressSend, numTerminate, is_test=False):
"""
takes binary image and pushes it to the contours_node.
Args:
addressReceive: string of the ip address followed by the port to make the connection with ostu_node.
addressSend : string of the ip address followed by the port to make the connection with contours_node.
numTerminate: number of terminates to be sent
"""
#make the connections
context = zmq.Context()
# receive binary image
collector_receiver = context.socket(zmq.PULL)
collector_receiver.bind(addressReceive)
# send the binary image to contours_node
collector_sender = context.socket(zmq.PUSH)
collector_sender.bind(addressSend)
TerminationCount = 0
while True:
if TerminationCount == numTerminate:
for i in range(numTerminate):
msg = { 'binary' : [] }
collector_sender.send_pyobj(msg)
break
#get the frames from ostu node and send them to contours node
work = collector_receiver.recv_pyobj()
if len(work['binary']) == 0:
TerminationCount +=1
continue
collector_sender.send_pyobj(work)
# return if the caller is a test
if is_test:
return
# wait for the other processes to finish
# time.sleep(10)
def main():
"""Main driver of collector node"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('-id', '--node_id', type=int, help='id for the currently running node')
argparser.add_argument('-n', '--total_num', type=int, help='total number of consumer nodes')
args = argparser.parse_args()
num_terminate = 0
if (args.total_num % 2 == 0):
num_terminate = 2
else:
if (args.node_id == ceil(args.total_num/2.0)):
num_terminate = 1
else:
num_terminate = 2
config = get_config_from_json("back_machine/config/server.json") # get other nodes addresses from json config
recv_address = config.collector_sockets[args.node_id-1] # get the receive address based on the node id
send_address = config.remote_sockets[args.node_id-1] # get the send address based on the node id
collector(recv_address, send_address, num_terminate) # call the OTSU collector process
if __name__=='__main__':
main() |
from django.contrib import admin
# Register your models here.
from .models import ZooSpamForm
admin.site.register(ZooSpamForm) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_m3u_dump
----------------------------------
Tests for `m3u_dump` module.
"""
import os
import pytest
from click.testing import CliRunner
from m3u_dump import cli
from m3u_dump.m3u_dump import M3uDump
@pytest.fixture(scope='session')
def music_dir(tmpdir_factory):
return tmpdir_factory.mktemp('music')
@pytest.fixture(scope='session')
def music_files(music_dir):
d = music_dir
d.join('dummy001.mp3').write('dummy')
d.mkdir('sub').join('dummy002.mp3').write('dummy')
d.mkdir('sub4').join('dummy002.mp3').write('dummy')
d.mkdir('sub2').mkdir('sub3').join('あいう えお.mp3').write('dummy')
d.mkdir('sub3').mkdir('かきく けこ').join('あいう えお.mp3').write('dummy')
return d
# noinspection PyShadowingNames
@pytest.fixture(scope='session')
def multi_playlist_music_files(music_dir):
d = music_dir
d.join('aaaa.m3u').write('dummy')
d.mkdir('sub7').join('multi-dummy001.mp3').write('dummy')
d.mkdir('sub8').mkdir('sub2').join('multi-dummy002.mp3').write('dummy')
d.mkdir('sub9').join('multi-あいう えお.mp3').write('dummy')
d.mkdir('sub10').join('multi-あいう えお.mp3').write('dummy')
d.mkdir('sub11').join('multi-dummy004.mp3').write('dummy')
d.mkdir('sub12').join('hello hello.mp3').write('dummy')
return d
@pytest.fixture
def playlist_dir(tmpdir_factory):
return tmpdir_factory.mktemp('playlist')
# noinspection PyShadowingNames
@pytest.fixture
def playlist_current(playlist_dir):
f = playlist_dir.join('playlist.m3u')
f.write("""#EXTM3U
#EXTINF:409,artist - music_name
/full/path/dummy001.mp3
#EXTINF:281,artist - music_name
/full/path/dummy002.mp3
#EXTINF:275,artist - music_name
music/あいう えお.mp3
#EXTINF:263,artist - music_name
/full/path/music/あいう えお.mp3
#EXTINF:288,artist - music_name
/full/path/aaa/dummy002.mp3
#EXTINF:222,artist = music_name
../../hello.mp3""")
return f
@pytest.fixture(scope='session')
def already_exists_playlist(tmpdir_factory):
d = tmpdir_factory.mktemp('already-dir')
music_path = str(d.mkdir('music').join('already_path.mp3').write('dummy'))
playlist_content = """#EXTM3U
#EXTINF:409,artist - music_name
{}""".format(os.path.join(str(d), 'music', 'already_path.mp3'))
playlist_path = str(d.join('playlist.m3u').write(playlist_content))
return d
# noinspection PyShadowingNames
@pytest.fixture
def playlist_current2(playlist_dir):
f = playlist_dir.join('playlist2.m3u8')
f.write("""#EXTM3U
#EXTINF:409,artist - music_name
/full/path/multi-dummy001.mp3
#EXTINF:282,artist - music_name
/full/path/multi-dummy001.mp3
#EXTINF:281,artist - music_name
/full/path/multi-dummy002.mp3
#EXTINF:275,artist - music_name
music/multi-あいう えお.mp3
#EXTINF:263,artist - music_name
/full/path/music/multi-あいう えお.mp3
#EXTINF:288,artist - music_name
/full/path/aaa/multi-dummy004.mp3
#EXTINF:222,artist = music_name
../../multi-hello.mp3""")
return f
@pytest.fixture(scope='session')
def dump_music_path(tmpdir_factory):
d = tmpdir_factory.mktemp('dst')
return str(d)
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 2 # must arguments
assert 'Error: Missing argument' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help' in help_result.output
assert 'Show this message and exit.' in help_result.output
# noinspection PyShadowingNames
def test_command_line_dryrun(playlist_current, tmpdir_factory, music_files):
dst_dir = str(tmpdir_factory.mktemp('no-dump-music'))
runner = CliRunner()
result = runner.invoke(cli.main, ['--dry-run', str(playlist_current),
dst_dir, '--fix-search-path',
str(music_files)])
assert 'Welcome m3u-dump' in result.output
assert 'copy was completed(successful' in result.output
assert result.exit_code == 0 # must arguments
# copy できていないこと
assert os.path.exists(os.path.join(dst_dir, 'dummy001.mp3')) is False
assert os.path.exists(os.path.join(dst_dir, 'dummy002.mp3')) is False
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is False
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is False
assert os.path.exists(os.path.join(dst_dir, 'hello.mp3')) is False
playlist_name = os.path.basename(str(playlist_current))
playlist_path = os.path.join(dst_dir, playlist_name)
assert os.path.exists(playlist_path) is False
# noinspection PyShadowingNames
def test_command_line_start(playlist_current, tmpdir_factory, music_files):
dst_dir = str(tmpdir_factory.mktemp('dump-music'))
runner = CliRunner()
result = runner.invoke(cli.main, [str(playlist_current), dst_dir,
'--fix-search-path', str(music_files)])
for line in result.output.split('\n'):
print(line)
assert 'Welcome m3u-dump' in result.output
assert 'copy was completed(successful' in result.output
assert result.exit_code == 0 # must arguments
# copy できているか確認する
assert os.path.exists(os.path.join(dst_dir, 'dummy001.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'dummy002.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'hello.mp3')) is False
playlist_name = os.path.basename(str(playlist_current))
playlist_path = os.path.join(dst_dir, playlist_name)
assert os.path.exists(playlist_path) is True
with open(playlist_path, 'r') as f:
assert '#EXTM3U' == f.readline().rstrip('\n')
assert '#EXTINF:409,artist - music_name' == f.readline().rstrip('\n')
assert 'dummy001.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:281,artist - music_name' == f.readline().rstrip('\n')
assert 'dummy002.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:275,artist - music_name' == f.readline().rstrip('\n')
assert 'あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:263,artist - music_name' == f.readline().rstrip('\n')
assert 'あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:288,artist - music_name' == f.readline().rstrip('\n')
assert 'dummy002.mp3' == f.readline().rstrip('\n')
assert '' == f.readline().rstrip('\n')
# noinspection PyShadowingNames
def test_command_line_no_fix_start(playlist_current, tmpdir_factory, music_files):
dst_dir = str(tmpdir_factory.mktemp('dump-music'))
runner = CliRunner()
result = runner.invoke(cli.main, [str(playlist_current), dst_dir])
for line in result.output.split('\n'):
print(line)
assert 'Welcome m3u-dump' in result.output
assert 'copy was completed(successful' in result.output
assert result.exit_code == 0 # must arguments
# noinspection PyShadowingNames
def test_command_line_already_playlist(already_exists_playlist):
music_path = os.path.join(str(already_exists_playlist), 'music')
dst_dir = os.path.join(str(already_exists_playlist), 'dst')
os.mkdir(dst_dir)
playlist_path = os.path.join(str(already_exists_playlist), 'playlist.m3u')
runner = CliRunner()
result = runner.invoke(cli.main, [playlist_path, dst_dir,
'--fix-search-path', str(music_path)])
for line in result.output.split('\n'):
print(line)
assert 'Welcome m3u-dump' in result.output
assert 'copy was completed(successful' in result.output
assert result.exit_code == 0 # must arguments
# copy できているか確認する
assert os.path.exists(os.path.join(dst_dir, 'already_path.mp3')) is True
playlist_path = os.path.join(dst_dir, 'playlist.m3u')
assert os.path.exists(playlist_path) is True
with open(playlist_path, 'r') as f:
assert '#EXTM3U' == f.readline().rstrip('\n')
assert '#EXTINF:409,artist - music_name' == f.readline().rstrip('\n')
assert 'already_path.mp3' == f.readline().rstrip('\n')
# noinspection PyShadowingNames
def test_command_line_multi_playlist(playlist_current, playlist_current2,
tmpdir_factory, music_files, multi_playlist_music_files):
playlist_dir = os.path.dirname(str(playlist_current))
dst_dir = str(tmpdir_factory.mktemp('dump-music'))
runner = CliRunner()
result = runner.invoke(cli.main, [playlist_dir, dst_dir,
'--fix-search-path',
str(music_files)])
for line in result.output.split('\n'):
print(line)
assert 'Welcome m3u-dump' in result.output
assert 'copy was completed(successful' in result.output
assert result.exit_code == 0 # must arguments
# copy できているか確認する
assert os.path.exists(os.path.join(dst_dir, 'dummy001.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'dummy002.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'hello.mp3')) is False
assert os.path.exists(os.path.join(dst_dir, 'multi-dummy001.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'multi-dummy002.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'multi-あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'multi-あいう えお.mp3')) is True
assert os.path.exists(os.path.join(dst_dir, 'multi-dummy004.mp3')) is True
playlist_name = os.path.basename(str(playlist_current))
playlist_path = os.path.join(dst_dir, playlist_name)
assert os.path.exists(playlist_path) is True
with open(playlist_path, 'r') as f:
assert '#EXTM3U' == f.readline().rstrip('\n')
assert '#EXTINF:409,artist - music_name' == f.readline().rstrip(
'\n')
assert 'dummy001.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:281,artist - music_name' == f.readline().rstrip(
'\n')
assert 'dummy002.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:275,artist - music_name' == f.readline().rstrip(
'\n')
assert 'あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:263,artist - music_name' == f.readline().rstrip(
'\n')
assert 'あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:288,artist - music_name' == f.readline().rstrip(
'\n')
assert 'dummy002.mp3' == f.readline().rstrip('\n')
assert '' == f.readline().rstrip('\n')
playlist_name = os.path.basename(str(playlist_current2))
playlist_path = os.path.join(dst_dir, playlist_name)
assert os.path.exists(playlist_path) is True
with open(playlist_path, 'r') as f:
assert '#EXTM3U' == f.readline().rstrip('\n')
assert '#EXTINF:409,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-dummy001.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:282,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-dummy001.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:281,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-dummy002.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:275,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:263,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-あいう えお.mp3' == f.readline().rstrip('\n')
assert '#EXTINF:288,artist - music_name' == f.readline().rstrip(
'\n')
assert 'multi-dummy004.mp3' == f.readline().rstrip('\n')
assert '' == f.readline().rstrip('\n')
# noinspection PyShadowingNames
def test_parse_playlist(playlist_current):
playlist_path = str(playlist_current)
files = list(M3uDump.parse_playlist(playlist_path))
assert files[2] == '/full/path/dummy001.mp3'
assert files[4] == '/full/path/dummy002.mp3'
assert files[6] == 'music/あいう えお.mp3'
assert files[8] == '/full/path/music/あいう えお.mp3'
assert len(files) == 13
# noinspection PyShadowingNames
def test_get_search_path_files(music_files):
search_path_files = M3uDump.get_search_path_files(str(music_files))
assert 'tmp/music0' in search_path_files['dummy001.mp3'][0]
assert 'tmp/music0/sub' in search_path_files['dummy002.mp3'][0]
assert 'tmp/music0/sub2/sub3' in search_path_files['あいう えお.mp3'][0]
assert 'tmp/music0/sub3/かきく けこ' in search_path_files['あいう えお.mp3'][0]
assert len(search_path_files.keys()) == 11
# noinspection PyShadowingNames
def test_fix_playlist(playlist_current, music_files):
playlist_path = str(playlist_current)
files = list(M3uDump.parse_playlist(playlist_path))
search_path_files = M3uDump.get_search_path_files(str(music_files))
p = M3uDump.fix_playlist(search_path_files, files)
assert 'tmp/music0/dummy001.mp3' in p[2]
assert 'tmp/music0/sub/dummy002.mp3' in p[4]
assert 'tmp/music0/sub2/sub3/あいう えお.mp3' in p[6]
assert 'tmp/music0/sub3/かきく けこ/あいう えお.mp3' in p[8]
assert len(p) == 11
# noinspection PyShadowingNames
def test_copy_music_dryrun(playlist_current, music_files, dump_music_path):
playlist_path = str(playlist_current)
files = list(M3uDump.parse_playlist(playlist_path))
search_path_files = M3uDump.get_search_path_files(str(music_files))
playlist = M3uDump.fix_playlist(search_path_files, files)
M3uDump.copy_music(playlist, dump_music_path, True)
assert os.path.exists(
os.path.join(dump_music_path, 'dummy001.mp3')) is False
assert os.path.exists(
os.path.join(dump_music_path, 'dummy002.mp3')) is False
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3')) is False
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3')) is False
# noinspection PyShadowingNames
def test_copy_music_nodryrun(playlist_current, music_files, dump_music_path):
playlist_path = str(playlist_current)
files = list(M3uDump.parse_playlist(playlist_path))
search_path_files = M3uDump.get_search_path_files(str(music_files))
playlist = M3uDump.fix_playlist(search_path_files, files)
M3uDump.copy_music(playlist, dump_music_path, False)
assert os.path.exists(os.path.join(dump_music_path, 'dummy001.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'dummy002.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3'))
# noinspection PyShadowingNames
def test_copy_music_override(playlist_current, music_files, dump_music_path):
playlist_path = str(playlist_current)
files = list(M3uDump.parse_playlist(playlist_path))
search_path_files = M3uDump.get_search_path_files(str(music_files))
playlist = M3uDump.fix_playlist(search_path_files, files)
M3uDump.copy_music(playlist, dump_music_path, False)
M3uDump.copy_music(playlist, dump_music_path, False)
assert os.path.exists(os.path.join(dump_music_path, 'dummy001.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'dummy002.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3'))
assert os.path.exists(os.path.join(dump_music_path, 'あいう えお.mp3'))
# noinspection PyShadowingNames
def test_load_from_playlist_path(playlist_dir, playlist_current2, playlist_current):
playlist_path = str(playlist_dir)
allowed_pattern = ['*.m3u', '*.m3u8']
path_list = M3uDump.load_from_playlist_path(playlist_path, allowed_pattern)
# os.walk は順序が分からない
assert 'playlist.m3u' in path_list[0]
assert 'playlist2.m3u8' in path_list[1]
assert len(path_list) == 2
|
import os
from utils import load_as_dictionary
config = load_as_dictionary(os.environ.get("CONFIG_PATH", "config/config.yaml"))
|
from flask import Flask, render_template, request, jsonify
from evaluate import tweetscore
import evaluate
import emoticonTranslator
from text2speech import synthesize_text_file
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
phrase = request.form['phrase']
score = tweetscore(phrase)
score = (score+1)/2*100
score = float("{0:.2f}".format(score))
isSarcastic = (score > 50)
if isSarcastic:
print("Yep sarcastic.")
newAudio = synthesize_text_file(phrase,isSarcastic)
print(newAudio)
results = {"score": str(score), "newAudio":str(newAudio)}
return jsonify(results)
return render_template("index.html")
if __name__ == '__main__':
app.run(debug = True)
|
def matrix_multiple(first, second):
ret = [[0 for i in range(8)] for j in range(8)]
for i in range(8):
for j in range(8):
for k in range(8):
ret[i][j] += first[i][k] * second[k][j]
ret[i][j] = ret[i][j] % 1000000007
return ret
matrix = [0 for i in range(32)]
matrix[0] = [[0,1,1,0,0,0,0,0],
[1,0,1,1,0,0,0,0],
[1,1,0,1,1,0,0,0],
[0,1,1,0,1,1,0,0],
[0,0,1,1,0,1,1,0],
[0,0,0,1,1,0,0,1],
[0,0,0,0,1,0,0,1],
[0,0,0,0,0,1,1,0]]
s_matrix = [[1,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0],
[0,0,0,1,0,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,1]]
for i in list(range(1,32)):
matrix[i] = matrix_multiple(matrix[i-1], matrix[i-1])
D = int(input())
s = int(pow(2, 31))
i = 31
while s != 0:
if D >= s:
D = D - s
s_matrix = matrix_multiple(s_matrix, matrix[i])
s = s // 2
i -= 1
print (s_matrix[0][0])
|
# coding: utf-8
# In[13]:
import psycopg2 as pg
import csv
import os
import sys
def csv2db(dbname,schema,host,user,password,csvfile):
connect_cmd='dbname="'+dbname+'" user="'+user+'" host="'+host+'" password="'+password+'"'
try:
conn = pg.connect(connect_cmd)
except:
print("Unable to connect to the database")
cur = conn.cursor()
# remove the path and extension and use what's left as a table name
tablename = os.path.splitext(os.path.basename(csvfile))[0]
if schema!='':
tablename = schema+'.'+tablename
with open(csvfile, "r") as f:
reader = csv.reader(f,delimiter='|', quotechar='"')
header = True
for row in reader:
if header:
# gather column names from the first row of the csv
header = False
sql = "DROP TABLE IF EXISTS %s;" % tablename
cur.execute(sql)
sql = "CREATE TABLE %s (%s)" % (tablename,
", ".join([ "%s varchar" % column for column in row ]))
cur.execute(sql)
conn.commit()
for column in row:
if column.lower().endswith("_id"):
index = "%s__%s" % ( tablename, column )
sql = "CREATE INDEX %s on %s (%s)" % ( index, tablename, column )
cur.execute(sql)
conn.commit()
else:
insertsql = "INSERT INTO %s VALUES (E'%s')" % (tablename,"',E'".join( row))
# skip lines that don't have the right number of columns
cur.execute(insertsql)
conn.commit()
cur.close()
conn.close()
|
import csv
import glob
import logging
import os
import re
from datetime import datetime
from random import Random
import global_constants
import function_library as func_lib
import consecutive_words_format
import word_list_format
from tqdm import tqdm
# Logging
logs_folder = 'logs'
os.makedirs(logs_folder, exist_ok=True)
# Attempt to make the basic config instantiation global.
process_logs_folder = 'process_logs'
os.makedirs(process_logs_folder, exist_ok=True)
logging.basicConfig(filename=os.path.join(process_logs_folder, 'info_' + global_constants.CAPTCHA_TYPE + '.log'), format='%(message)s',
level=logging.INFO)
def produce_clips_for_user_study(study_input_folder, audio_type, process_time, output_file_tag, study_output_folder,
file_ending=".wav", global_csv_writer=None, selected_csv_writer=None):
"""Read the files
Allocate to different Captcha Types
Execute CAPTCHA generation for each file
log eligible CAPTCHA files
"""
file_list = glob.glob(study_input_folder + os.path.sep + "*" + file_ending)
# Set to a constant seed - Pick a number you like
Random(2018).shuffle(file_list)
partition_length = int(len(file_list) / 3)
if global_constants.CAPTCHA_TYPE == "3b":
file_list = file_list[partition_length * 0: partition_length * 1]
elif global_constants.CAPTCHA_TYPE == "2":
file_list = file_list[partition_length * 1: partition_length * 2]
elif global_constants.CAPTCHA_TYPE == "4":
file_list = file_list[partition_length * 2: partition_length * 3]
else:
raise Exception("Captcha Type not supported " + global_constants.CAPTCHA_TYPE)
# Stores a list of all the clips sent to the IBM network for clip verification
global_clip_rows = []
# Stores a list of only those clips which beat the IBM system.
selected_rows = []
source_regex = r"(?<=" + re.escape(study_input_folder) + r").+?(?=.wav)"
for file_index, file_path in tqdm(enumerate(file_list)):
try:
func_lib.check_and_clip_loud_volume(file_path)
_, extract_name = os.path.split(file_path)
extract_name, _ = os.path.splitext(extract_name)
if extract_name == "":
continue
extract_name = extract_name + "_" + process_time
if global_constants.CAPTCHA_TYPE == "4":
word_list_format.user_study_function(file_path, study_output_folder, extract_name, audio_type,
global_clip_rows, selected_rows)
else:
consecutive_words_format.user_study_function(file_path, study_output_folder, extract_name,
audio_type, global_clip_rows, selected_rows)
logging.info("Done for : " + file_path + " output : " + str(global_clip_rows) + str(selected_rows))
if len(global_clip_rows) > 0:
# Lazy load because the system other wise creates loads of empty excel files for test procedures.
if global_csv_writer is None:
file_layout = open(os.path.join("logs", "detail_" + process_time + "_" +
output_file_tag + ".csv"), "w", newline='')
global_csv_writer = csv.writer(file_layout)
global_csv_writer.writerows(global_clip_rows)
global_clip_rows = []
if len(selected_rows) > 0:
# Lazy load because the system other wise creates loads of empty excel files for test procedures.
if selected_csv_writer is None:
selected_strings_layout = open(os.path.join("logs", "selected_" + process_time + "_" +
output_file_tag + ".csv"), "w", newline='')
selected_csv_writer = csv.writer(selected_strings_layout)
selected_csv_writer.writerows(selected_rows)
selected_rows = []
except TimeoutError as timeOut:
logging.exception("Probably reached the limit on the IBM resources. Processed till - " + str(file_index))
print("TimeoutError!\nProbably reached the limit on the IBM resources. Processed till - ", file_index, file_path, global_constants.CAPTCHA_TYPE, timeOut)
return
except Exception as fileException:
logging.exception(str(fileException))
def debug():
"""Chunk input files. Required because 30 min files don't return.
The execute prepare_for_user_study for each audio source type.
"""
try:
audio_property_list = [
{"type": "indian_lecture", "output": "indian_lecture/", "input": "indian_lecture/",
"chunk_required": False},
{"type": "podcast_lecture", "output": "podcast_lecture/", "input": "podcast_lecture/",
"chunk_required": False},
{"type": "YT_lecture", "output": "lecture/", "input": "lecture/", "chunk_required": False},
{"type": "movie", "output": "movie/", "input": "movie/", "chunk_required": False},
{"type": "song", "output": "song/", "input": "song/", "chunk_required": False},
{"type": "radio", "output": "radio/", "input": "philip_marlowe/", "chunk_required": False}]
for type_entry in audio_property_list:
if type_entry['type'] != 'YT_lecture':
continue
chunk_location = os.path.join(global_constants.INPUT_CHUNK_STAGE, type_entry["output"])
if type_entry["chunk_required"]:
func_lib.save_to_chunks(global_constants.INPUT_DATA_STAGE, chunk_location, type_entry["input"])
main_process_start_time = str(datetime.now()).replace(" ", "_").replace(":", "_").replace(".", "_")
output_file_tag = "_".join(["REBOOT", type_entry['type'], global_constants.CAPTCHA_TYPE])
produce_clips_for_user_study(chunk_location, type_entry["type"], main_process_start_time, output_file_tag, global_constants.OUTPUT_DATA_DETAILS_STAGE,
file_ending=".wav")
except Exception as e:
logging.exception(str(e))
print(str(e))
if __name__ == '__main__':
debug()
|
# 访问已有的数据综合
# 数据集可视化
# 加载本地数据集
# 输出显示测试集数据数和训练集数据数
import tensorflow as tf
boston_housing = tf.keras.datasets.boston_housing
(train_x,train_y),(test_x,test_y) = boston_housing.load_data()
# print("Training set:",len(train_x))
# print("Testing set:",len(test_x))
# 改变数据集划分比例
(train_x,train_y),(test_x,test_y) = boston_housing.load_data(test_split=0) #全改成训练集
# print("Training set:",len(train_x))
# print("Testing set:",len(test_x))
#访问数据集中的数据
type(train_x)
type(train_y)
print("Dim of train_x:",train_x.ndim) #维数,秩
print("Shape of train_x:",train_x.shape) #形状
print("Dim of train_y:",train_y.ndim)
print("Shape of train_y:",train_y.shape)
#访问前5行数据
print(train_x[0:5])
#输出第6列数据
print(train_x[:,5])
#输出train_y所有数据
print(train_y)
#数据集可视化
#房间数与房价的关系
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
boston_housing = tf.keras.datasets.boston_housing
(train_x,train_y),(_,_) = boston_housing.load_data(test_split=0)
#画图
plt.figure(figsize=(5,5))
plt.scatter(train_x[:,5],train_y)
plt.xlabel("RM")
plt.ylabel("Price($1000's)")
plt.title("5. RM-Price")
plt.show()
|
import math
def iszhishu(num):
"""
最优解法
"""
if num <= 3:
return num > 1
sqrt_num = math.sqrt(num)
for i in (2, sqrt_num + 1):
if num % i == 0:
return False
return True
def iszhishu_best(num):
"""
最优解法
我们继续分析,其实质数还有一个特点,就是它总是等于 6x-1 或者 6x+1,其中 x 是大于等于1的自然数。
如何论证这个结论呢,其实不难。首先 6x 肯定不是质数,因为它能被 6 整除;其次 6x+2 肯定也不是质数,因为它还能被2整除;
依次类推,6x+3 肯定能被 3 整除;6x+4 肯定能被 2 整除。那么,就只有 6x+1 和 6x+5 (即等同于6x-1) 可能是质数了。
所以循环的步长可以设为 6,然后每次只判断 6 两侧的数即可。
"""
if num <= 3:
return num > 1
if num % 6 != 1 and num % 6 != 5:
return False
sqrt_num = math.sqrt(num)
for i in (5, sqrt_num + 1, 6):
if num % i == 0 or num % (i + 2) == 0:
return False
return True
|
from challenges.hashtable.hashtable import HashTable
def test_create():
hashtable = HashTable()
assert hashtable
def test_predictable_hash():
hashtable = HashTable()
initial = hashtable._hash('spam')
secondary = hashtable._hash('spam')
assert initial == secondary
def test_in_range_hash():
hashtable = HashTable()
actual = hashtable._hash('spam')
# assert actual >= 0
# assert actual < hashtable.size
assert 0 <= actual < hashtable.size
def test_same_hash():
hashtable = HashTable()
initial = hashtable._hash('listen')
secondary = hashtable._hash('silent')
assert initial == secondary
def test_different_hash():
hashtable = HashTable()
initial = hashtable._hash('glisten')
secondary = hashtable._hash('silent')
assert initial != secondary
def test_get_add():
hashtable = HashTable()
hashtable.add('cat','dog')
actual = hashtable.get('cat')
expected = 'dog'
assert actual == expected
def test_contains():
hashtable = HashTable()
hashtable.add('cat','dog')
actual = hashtable.contains('cat')
expected = True
assert actual == expected |
import torch
class Polynom(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.zeros(10, dtype=torch.float64))
self.b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float64))
self.power = torch.concat([torch.ones(5, dtype=torch.float64),
torch.ones(5, dtype=torch.float64) + 1])
def __call__(self, x):
xx = torch.concat([x, x], dim=1)
return torch.mul(self.w, torch.pow(xx, self.power)).sum(dim=1) + self.b
# torch.set_printoptions(precision=6, sci_mode=False)
file_input = open("input.txt", "r")
train_x = []
train_y = []
for _ in range(1000):
row = file_input.readline().split()
row = list(map(lambda x: float(x), row))
train_x.append(row[:-1])
train_y.append(row[-1])
test_x = []
for _ in range(1000):
row = file_input.readline().split()
row = list(map(lambda x: float(x), row))
test_x.append(row)
train_x, train_y, test_x = torch.tensor(train_x, dtype=torch.float64), \
torch.tensor(train_y, dtype=torch.float64), \
torch.tensor(test_x, dtype=torch.float64)
# data_x = torch.rand((2000, 5), dtype=torch.float64) * 10 - 5
# data_x = data_x[torch.randperm(2000)]
# data_y = 12 * torch.pow(data_x[:, 0], 2) + 1 * data_x[:, 1] + 0.01 * data_x[:, 2]\
# - 4.8 * data_x[:, 3] + 5 * data_x[:, 4] + 10
# train_x = data_x[:1000]
# train_y = data_y[:1000]
# test_x = data_x[1000:]
# test_y = data_y[1000:]
mean_x = torch.mean(train_x, dim=0)
std_x = torch.std(train_x, dim=0)
train_x = (train_x - mean_x) / std_x
model = Polynom()
learning_rate = 1
batch_size = 250
step_count = 500 + 1
loss_fn = torch.nn.MSELoss(reduction='sum')
optim = torch.optim.Adam(model.parameters(), lr=learning_rate)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, [], 0.1)
for t in range(step_count):
for id_batch in range(batch_size, len(train_x) + 1, batch_size):
y_pred = model(train_x[id_batch - batch_size:id_batch])
loss = loss_fn(y_pred, train_y[id_batch - batch_size:id_batch])
optim.zero_grad()
loss.backward()
optim.step()
# if t % 100 == 0:
# predict_y = model((test_x - mean_x) / std_x)
# temp = torch.abs(predict_y - test_y)
# acc = torch.count_nonzero(temp < 1e-6).item() / 1000
# print(t, "Accuracy 1e-6:", acc)
# if acc > 0.98:
# break
# scheduler.step()
# print("koefs of power 1", model.w.data[:5], "\n")
# print("koefs of power 2", model.w.data[5:], "\n")
# print("bias", model.b, "\n")
predict_y = model((test_x - mean_x) / std_x)
# predict_y = model(test_x)
# temp = torch.abs(predict_y - test_y)
# print("Accuracy with precision 1:", torch.count_nonzero(temp < 1).item() / 1000)
# print("Accuracy with precision 2:", torch.count_nonzero(temp < 1e-2).item() / 1000)
# print("Accuracy with precision 4:", torch.count_nonzero(temp < 1e-4).item() / 1000)
# print("Accuracy with precision 6:", torch.count_nonzero(temp < 1e-6).item() / 1000)
for el in predict_y:
print(el.item())
|
#!/bin/python
import sys
import re
def valid_byr(value):
year = re.search(r"\d{4}", value)
if year is None:
return False
return (int(year.group()) >= 1920 and int(year.group()) <= 2002)
def valid_iyr(value):
year = re.search(r"\d{4}", value)
if year is None:
return False
return (int(year.group()) >= 2010 and int(year.group()) <= 2020)
def valid_eyr(value):
year = re.search(r"\d{4}", value)
if year is None:
return False
return (int(year.group()) >= 2020 and int(year.group()) <= 2030)
def valid_hgt(value):
hgt = re.search(r"(\d+)(cm|in)", value)
if hgt is None:
return False
#print hgt, hgt.group(1), hgt.group(2)
if hgt.group(2) == 'cm':
return (int(hgt.group(1)) >= 150 and int(hgt.group(1)) <= 193)
if hgt.group(2) == 'in':
return (int(hgt.group(1)) >= 59 and int(hgt.group(1)) <= 76)
return False
def valid_hcl(value):
hcl = re.search(r"^#[0-9a-f]{6}$", value)
if hcl is None:
return False
return True
def valid_ecl(value):
return value in [
'amb',
'blu',
'brn',
'gry',
'grn',
'hzl',
'oth']
def valid_pid(value):
pid = re.search(r"^[0-9]{9}$", value)
if pid is None:
return False
return True
def validate(passport):
fields = {
"byr" : valid_byr,
"iyr" : valid_iyr,
"eyr" : valid_eyr,
"hgt" : valid_hgt,
"hcl" : valid_hcl,
"ecl" : valid_ecl,
"pid" : valid_pid
}
if len(passport) < 7:
return 0
for field in fields.keys():
if not passport.has_key(field):
print "badf", passport, field
return 0
if not fields[field](passport[field]):
print "badv", passport, field, passport[field]
return 0
return 1
infile = open(sys.argv[1], "r")
current = {}
valid = 0
for line in infile:
if line == "\n":
valid += validate(current)
current = {}
pairs = line.rstrip().split(" ")
for p in pairs:
if p == "":
break
pp = p.split(":")
current[pp[0]] = pp[1]
print valid |
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
import re
import os
stanford_corenlp_path = r'/media/mcislab3d/Seagate Backup Plus Drive/zwt/stanford corenlp'
def segment_sentences_char(sentence_list):
return [' '.join(i) for i in sentence_list]
def segment_sentences(sentence_list):
segmenter = StanfordSegmenter(
java_class=r"edu.stanford.nlp.ie.crf.CRFClassifier",
path_to_jar=os.path.join(stanford_corenlp_path, 'stanford-segmenter-2018-02-27', 'stanford-segmenter-3.9.1.jar'),
path_to_slf4j=os.path.join(stanford_corenlp_path, 'slf4j-api-1.7.25.jar'),
path_to_sihan_corpora_dict=os.path.join(stanford_corenlp_path, 'stanford-segmenter-2018-02-27', 'data'),
path_to_model=os.path.join(stanford_corenlp_path, 'stanford-segmenter-2018-02-27', 'data', 'pku.gz'),
path_to_dict=os.path.join(stanford_corenlp_path, 'stanford-segmenter-2018-02-27', 'data', 'dict-chris6.ser.gz'),
sihan_post_processing='true'
)
result = segmenter.segment_sents(sentence_list)
result = result.strip()
segmented_list = re.split(os.linesep, result)
if len(segmented_list[-1]) == 0:
segmented_list = segmented_list[:-1]
if len(segmented_list) != len(sentence_list):
for i in range(len(segmented_list)):
ss = ''.join(segmented_list[i].split())
if ss != sentence_list[i]:
print(i, '|', segmented_list[i], '|', sentence_list[i])
# break
print(len(segmented_list), len(sentence_list))
assert len(segmented_list) == len(sentence_list)
return segmented_list |
def function(*args):
print(type(args))
function(1,2,3,5,6,7,7)
"""sum =0
def function1(*args): #variable length argument
for each in args:
sum += each
"""
#function1(1,2,3,5,6,7,7)
def function3(**kwargs):
print(type(kwargs))
function3(a=1,b=2,c=4)
def function4(**kwargs):
sum=0
for k,v in kwargs.items():
sum+=v
print sum
function4(a=1,b=2,c=4) |
# You can use this file to execute any code to be run when importing a module
# in the package for the first time
print("Hello from the init.py") |
#!/usr/bin/env python
"""
Delete CouchDB requests.
Delete requests in CouchDB specified by names (CouchDB IDs) in the input
file. Needs to have credentials for accessing CMS web ready in
$X509_USER_CERT $X509_USER_KEY, or proxy stored in /tmp/x509up_u<ID>
CMSCouch.Database only sets _deleted=True flag (all fields remain in
the database), using DELETE HTTP verb, the document stays in the database
too, however, only id, rev, and _deleted flag, everything else is wiped.
"""
from __future__ import print_function
couch_host = "https://cmsweb.cern.ch"
couch_uri = "couchdb/reqmgr_workload_cache"
import sys
import os
import httplib
import json
def main():
global couch_host, couch_uri
if len(sys.argv) < 2:
print ("Requires 1 input argument: file with a list of requests to "
"delete.")
sys.exit(1)
if couch_host.startswith("https://"):
couch_host = couch_host.replace("https://", '')
key_file = os.getenv("X509_USER_KEY", None) or "/tmp/x509up_u%s" % os.getuid()
cert_file = os.getenv("X509_USER_CERT", None) or "/tmp/x509up_u%s" % os.getuid()
conn = httplib.HTTPSConnection(couch_host, key_file=key_file, cert_file=cert_file)
input_file = sys.argv[1]
f = open(input_file, 'r')
# have to specify the documents revision, otherwise getting:
# {"error":"conflict","reason":"Document update conflict."} (409 code)
for request_name in f:
request_name = request_name.strip()
print("Deleting request: '%s' ... " % request_name)
uri="/%s/%s" % (couch_uri, request_name)
print("Getting document revision _rev ...")
# getting _rev
conn.request("GET", uri, None)
resp = conn.getresponse()
print("Response: %s" % resp.status)
try:
data = json.loads(resp.read())
except Exception as ex:
print("Reason: %s, %s" % (resp.reason, ex))
sys.exit(1)
if resp.status != 200:
print(data)
print("Skipping ...")
continue
rev = data["_rev"]
print("Delete request itself ...")
uri += "?rev=%s" % rev
conn.request("DELETE", uri, None)
resp = conn.getresponse()
# have to read the data, otherwise getting httplib.ResponseNotReady
data = resp.read()
print("Response: %s\n" % resp.status)
f.close()
if __name__ == "__main__":
main() |
#!/usr/bin/env python
from dsx import *
# Declaration of all MWMR fifos
tg_demux = Mwmr('tg_demux' , 32, 2)
demux_vld = Mwmr('demux_vld' , 32, 2)
vld_iqzz = Mwmr('vld_iqzz' , 128, 2)
iqzz_idct = Mwmr('iqzz_idct' , 256, 2)
idct_libu = Mwmr('idct_libu' , 64, 2)
libu_ramdac = Mwmr('libu_ramdac', 8*48,2)
huffman = Mwmr('huffman' , 32, 2)
quanti = Mwmr('quanti' , 64, 2)
tcg = Tcg(
Task( 'tg', "tg",
{'output':tg_demux },
defines = {'FILE_NAME':'"plan.mjpg"'}),
Task( 'demux', "demux",
{ 'input':tg_demux,
'output':demux_vld,
'huffman':huffman,
'quanti':quanti },
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
Task( 'vld', 'vld',
{ 'input':demux_vld,
'output':vld_iqzz,
'huffman':huffman },
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
Task( 'iqzz', 'iqzz',
{ 'input':vld_iqzz,
'output':iqzz_idct,
'quanti':quanti },
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
Task( 'idct', 'idct',
{ 'input':iqzz_idct,
'output': idct_libu},
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
Task( 'libu', 'libu',
{ 'input':idct_libu,
'output': libu_ramdac},
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
Task( 'ramdac', "ramdac",
{ 'input': libu_ramdac },
defines = {'WIDTH':"48",
'HEIGHT':"48"}),
)
p = Posix()
tcg.generate(p)
|
#The Game of choosing a number between 0 and 100
import random
Q = (random.randint(0, 100))
print Q
UP = int(100)
Down = int(0)
I = int(0)
while (I==0):
print "your guss should be between" , (Down,UP)
guss=raw_input ("Enter your guss:\n")
guss=int(guss)
if guss==Q:
I=int(1)
if guss<Q:
Down=guss
if guss>Q:
UP=guss
print "Finally you find the computer No. which is: %d" % guss
|
#!/usr/bin/python
#\file slider4.py
#\brief New QWidget slider class
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Apr.15, 2021
import sys
from PyQt4 import QtCore,QtGui
class TSlider(QtGui.QWidget):
def __init__(self, *args, **kwargs):
super(TSlider, self).__init__(*args, **kwargs)
def convert_from(self, slider_value):
return min(self.range_step[1], self.range_step[0] + self.range_step[2]*slider_value)
def convert_to(self, value):
return max(0,min(self.slider_max,(value-self.range_step[0])/self.range_step[2]))
def value(self):
return self.convert_from(self.slider.value())
def setValue(self, value):
slider_value= self.convert_to(value)
self.slider.setValue(slider_value)
self.setLabel(value)
def setLabel(self, value):
self.label.setText(str(value).rjust(len(str(self.range_step[1]))))
#style: 0:Default, 1:Variable handle size.
def Construct(self, range_step, n_labels, slider_style, onvaluechange):
self.range_step= range_step
self.slider_max= (self.range_step[1]-self.range_step[0])/self.range_step[2]
self.slider_style= slider_style
self.layout= QtGui.QGridLayout()
vspacer1= QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.layout.addItem(vspacer1, 0, 0, 1, n_labels+1)
self.slider= QtGui.QSlider(QtCore.Qt.Horizontal, self)
#self.slider.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.slider.setTickPosition(QtGui.QSlider.TicksBothSides)
self.slider.setRange(0, self.slider_max)
self.slider.setTickInterval(1)
self.slider.setSingleStep(1)
#self.slider.move(10, 60)
#self.slider.resize(100, 20)
self.slider.valueChanged.connect(lambda *args,**kwargs:(self.setLabel(self.value()), onvaluechange(*args,**kwargs) if onvaluechange else None)[-1])
self.layout.addWidget(self.slider, 1, 0, 1, n_labels)
self.label= QtGui.QLabel('0',self)
self.layout.addWidget(self.label, 1, n_labels, 1, 1, QtCore.Qt.AlignLeft)
#hspacer1= QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
#self.layout.addItem(hspacer1, 1, n_labels+1)
self.tick_labels= []
if n_labels>1:
#tick_font= QtGui.QFont(self.label.font().family(), self.label.font().pointSize()*0.6)
label_step= (range_step[1]-range_step[0])/(n_labels-1)
for i_label in range(n_labels):
label= str(range_step[0]+i_label*label_step)
tick_label= QtGui.QLabel(label,self)
#tick_label.setFont(tick_font)
if i_label<(n_labels-1)/2: align= QtCore.Qt.AlignLeft
elif i_label==(n_labels-1)/2: align= QtCore.Qt.AlignCenter
else: align= QtCore.Qt.AlignRight
self.layout.addWidget(tick_label, 2, i_label, 1, 1, align)
self.tick_labels.append(tick_label)
vspacer2= QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.layout.addItem(vspacer2, 3, 0, 1, n_labels+1)
self.setValue(range_step[0])
self.setLayout(self.layout)
self.setStyleForFont(self.label.font())
def setStyleForFont(self, f):
tick_f= QtGui.QFont(f.family(), f.pointSize()*0.6)
for tick_label in self.tick_labels:
tick_label.setFont(tick_f)
if self.slider_style==0:
self.slider.setStyleSheet('')
elif self.slider_style==1:
h0= f.pointSize()*2
h1= h0+8
self.slider.setStyleSheet('''
QSlider {{
height: {1}px;
}}
QSlider::groove:horizontal {{
background: transparent;
border: 2px solid #aaa;
height: {0}px;
margin: 0 0;
}}
QSlider::handle:horizontal {{
background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f);
border: 1px solid #5c5c5c;
width: {0}px;
margin: 0 0;
border-radius: 3px;
}}
'''.format(h0,h1))
def setFont(self, f):
self.label.setFont(f)
self.setStyleForFont(f)
def Print(*s):
for ss in s: print ss,
print ''
class TSliderTest(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.InitUI()
def InitUI(self):
# Set window size.
self.resize(320, 120)
# Set window title
self.setWindowTitle("SliderTest")
mainlayout= QtGui.QVBoxLayout()
self.setLayout(mainlayout)
slider1= TSlider(self)
slider1.Construct([1000,1800,100], n_labels=5, slider_style=1, onvaluechange=lambda _:Print(slider1.value()))
slider1.setValue(1600)
slider1.font_size= (10,30)
slider1.setFont(QtGui.QFont('', slider1.font_size[0]))
self.slider1= slider1
mainlayout.addWidget(slider1)
# Add a button
btn1= QtGui.QPushButton('_________Exit?_________', self)
#btn1.setFlat(True)
btn1.setToolTip('Click to make something happen')
btn1.clicked.connect(lambda:self.close() if self.slider1.value()<1500 else Print('Hint: Set value less than 1500 to exit'))
btn1.resize(btn1.sizeHint())
#btn1.move(100, 150)
btn1.font_size= (10,30)
btn1.setFont(QtGui.QFont('', btn1.font_size[0]))
#btn1.resizeEvent= lambda event,obj=btn1: self.ResizeText(obj,event)
self.btn1= btn1
mainlayout.addWidget(btn1)
btn1.resizeEvent= lambda event,objs=(btn1,slider1): ([self.ResizeText(obj,event) for obj in objs]+[None])[-1]
# Show window
self.show()
def ResizeText(self, obj, event):
font_size= min(obj.font_size[1],max(obj.font_size[0],int(self.rect().height()/100.*obj.font_size[0])))
f= QtGui.QFont('', font_size)
if isinstance(obj,QtGui.QRadioButton):
obj.setStyleSheet('QRadioButton::indicator {{width:{0}px;height:{0}px;}};'.format(1.3*font_size))
obj.setFont(f)
# Create an PyQT4 application object.
a = QtGui.QApplication(sys.argv)
# The QWidget widget is the base class of all user interface objects in PyQt4.
w = TSliderTest()
sys.exit(a.exec_())
|
from solvent import config
from solvent import run
from solvent import label
from upseto import gitwrapper
import logging
import os
class Submit:
def __init__(self, product, directory):
self._product = product
self._directory = directory
git = gitwrapper.GitWrapper(os.getcwd())
self._basename = git.originURLBasename()
if config.OFFICIAL_BUILD:
self._state = 'officialcandidate'
elif config.CLEAN:
self._state = 'cleancandidate'
else:
self._state = 'dirty'
self._label = label.label(
basename=self._basename, product=self._product, hash=git.hash(), state=self._state)
if config.OFFICIAL_BUILD or config.CLEAN:
run.run([
"python", "-m", "upseto.main", "checkRequirements",
"--allowNoManifest", "--unsullied", "--gitClean"])
def go(self):
self._handleCollision(config.LOCAL_OSMOSIS)
if config.WITH_OFFICIAL_OBJECT_STORE:
self._handleCollision(config.OFFICIAL_OSMOSIS)
logging.info("Submitting locally as '%(label)s'", dict(label=self._label))
self._checkin(config.LOCAL_OSMOSIS)
if config.WITH_OFFICIAL_OBJECT_STORE:
logging.info("Submitting to official store as '%(label)s'", dict(label=self._label))
self._checkin(config.OFFICIAL_OSMOSIS)
logging.info("Submitted as '%(label)s'", dict(label=self._label))
def _hasLabel(self, objectStore):
output = run.run([
"osmosis", "listlabels", '^' + self._label + '$', "--objectStores", objectStore])
return len(output.split('\n')) > 1
def _checkin(self, objectStore):
run.run([
"osmosis", "checkin", self._directory, self._label,
"--MD5",
"--objectStores", objectStore])
def _eraseLabel(self, objectStore):
run.run([
"osmosis", "eraselabel", self._label,
"--objectStores", objectStore])
def _handleCollision(self, objectStore):
if self._hasLabel(objectStore):
if config.FORCE:
self._eraseLabel(objectStore)
else:
raise Exception("Object store '%s' already has a label '%s'" % (
objectStore, self._label))
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# make the figure 3 from Hajo and Marks paper.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == '__main__':
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
from affine import Affine
import numpy as np
import rasterio, os, calendar, datetime
import argparse
# parse some args
parser = argparse.ArgumentParser( description='plot fig 3 paper' )
parser.add_argument( "-b", "--base_path", action='store', dest='base_path', type=str, help="input hourly directory containing the NSIDC_0051 data converted to GTiff" )
# parser.add_argument( "-w", "--window_len", action='store', dest='window_len', type=int, help="window length to add to the output NetCDF file name" )
# unpack args
args = parser.parse_args()
base_path = args.base_path
# window_len = args.window_len
# # # TESTING
# # window_len = 4
# base_path = '/Users/malindgren/Documents/nsidc_0051'
# # # END TESTING
# # handle custom hann
# if window_len == 1:
# window_len = 'paper_weights'
netcdf_fn = os.path.join( base_path, 'NetCDF','nsidc_0051_sic_nasateam_1978-2017_Alaska_hann_smoothed.nc' )
ds = xr.open_dataset( netcdf_fn )
a = Affine(*eval( ds.affine_transform )[:6]) # make an affine transform for lookups
# [ HARDWIRED ] make barrow points and get their row/col locs
points_fn = os.path.join( base_path,'selection_points','barrow_points.shp' )
points = gpd.read_file( points_fn ).geometry.apply(lambda x: (x.x, x.y)).tolist()
colrows = [ ~a*pt for pt in points ]
colrows = [ (int(c),int(r)) for c,r in colrows ]
cols = [c for c,r in colrows]
rows = [r for c,r in colrows]
# make a climatology -- THIS IS OLDER BUT MAYBE STILL NEEDED? FOR PROPER COMPARISON
# #
# clim_fn = netcdf_fn.replace( '.nc', '_1979-2007_climatology.nc' )
# if not os.path.exists( clim_fn ):
# clim_sel = ds.sel( time=slice('1979','2007') )
# clim = clim_sel.groupby('time.dayofyear').mean('time')
# clim.to_netcdf( clim_fn, format='NETCDF3_64BIT' )
# else:
# clim = xr.open_dataset( clim_fn )
# clim_sel = ds.sel( time=slice('1979','2007') )
# clim = clim_sel.groupby('time.dayofyear').mean('time')
# clim.to_netcdf( clim_fn, format='NETCDF4' )
# read in an already produced climatology
clim_fn = netcdf_fn.replace( '.nc', '_climatology.nc' )
clim = xr.open_dataset( clim_fn )
clim_sel = clim.sel( dayofyear=slice(121, 366) )
clim_hold = [ clim_sel.sic[:,r,c].values for c,r in colrows ]
clim_mean = pd.Series( np.mean( clim_hold, axis=0 ), index=clim_sel.dayofyear.to_index() )
plt.figure(figsize=(10, 4))
clim_mean.plot( kind='line' )
plt.tight_layout()
plt.savefig(os.path.join(base_path, 'png','barrow_avg_hann_smoothed_fig3.png'), figsize=(20,2), dpi=300)
plt.cla()
plt.close()
|
from flask import Blueprint, request, jsonify
from regression_model.predict import make_prediction
from flask_cors import CORS
from api.config import get_logger
#from api.validation import validate_inputs
_logger = get_logger(logger_name=__name__)
prediction_app = Blueprint('prediction_app',__name__)
CORS(prediction_app)
@prediction_app.route('/', methods=['GET'])
def index():
title = "Full-Stack Data Science for House Price Prediction"
heading = "This is the web app as final step for Full-Stack Data Science project. Code for the full life cycle of the project can be found from the link below."
return jsonify({'title': title,
'heading': heading
})
@prediction_app.route('/health',methods=['GET'])
def health():
if request.method == 'GET':
_logger.info('health status OK')
return 'ok'
@prediction_app.route('/v1/predict/regression',methods=['POST'])
def predict():
if request.method == 'POST':
# step:1 Extract Post data from request body as JSON
json_data = request.get_json()
print(f'User input from UI: {json_data}')
_logger.info(f'Inputs: {json_data}')
# # step:2 Validate the input using marshmallow schema
#input_data,errors = validate_inputs(input_data=json_data)
# step 3: model prediction
result = make_prediction(input_data=json_data)
_logger.info(f'Outputs: {result}')
# step 4: Convert numpy ndarray to list
predictions = round(result.get('prediction')[0],2)
#print(f'prediction from model ==== {predictions}')
#version = result.get('version')
return jsonify({'prediction': predictions}), 200
# return jsonify({'predictions': predictions,
# 'errors': errors})
|
from doisouum import DoisOuUm
x = DoisOuUm(6000)
x.salvar_log(False)
x.executar()
|
#!/usr/bin/python3
""" 101-main """
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
if __name__ == "__main__":
list_rectangles = [
Rectangle(2**i, 2**i) for i in range(1, 5)
]
list_squares = [
Square(2**i) for i in range(5, 9)
]
Base.draw(list_rectangles, list_squares)
|
channels = ['3mu', '2mu1e', '2e1mu', '3e']
allChannels = ['all'] + channels
# This adds more versatile channels. Avoid long lists of different flavor channels like Humuhumunukunukuapua
class channel:
def __init__(self, nElectrons=-1, nMuons=-1):
self.nE = nElectrons
self.nM = nMuons
if (self.nE > -1) and (self.nM > -1):
self.name = "Mu"*self.nM + "E"*self.nE
else:
self.name = "all"
singlelepChannels = [channel(1,0), channel(0,1)]
allSinglelepChannels= [channel(-1,-1)] + singlelepChannels
trilepChannels = [channel(3,0), channel(2,1), channel(1,2), channel(0,3)]
allTrilepChannels = [channel(-1,-1)] + trilepChannels
quadlepChannels = [channel(4,0), channel(3,1), channel(2,2), channel(1,3), channel(0,4)]
allQuadlepChannels = [channel(-1,-1)] + quadlepChannels
from TopEFT.Tools.helpers import mZ
def getZCut(mode, var, zMassRange=15):
zstr = "abs(%s - %s)"%(var, mZ)
if mode.lower()=="onz": return zstr+"<="+str(zMassRange)
if mode.lower()=="offz": return zstr+">"+str(zMassRange)
return "(1)"
|
import sys
import glob
from multiprocessing import Process,Manager
from threading import Thread
import serial
import time
import os
import json
import hashlib
class bm:
msg=""
rec_arr=Manager().list()
send_arr=Manager().list()
serial_enable=Manager().dict()
ser=None
def __init__(self,serial_port='/dev/ttyS1',bandurate=115200):
try:
self.ser=serial.Serial(serial_port,bandurate,timeout=30)
except Exception as e:
print(str(e))
self.ser.close()
self.serial_enable[0]=True
self.ser_read_proc=Process(target=self.__serial_read)
self.ser_send_proc=Process(target=self.__serial_send)
self.ser_read_proc.start()
self.ser_send_proc.start()
#time.sleep(1)
print("start")
def __del__(self):
enable={0:False}
self.serial_enable=enable
print("end")
def __serial_send(self):#串口发送守护
while True:
try:
enable=self.serial_enable[0]
if not enable:
return
except Exception:
return
send_arr=self.send_arr
try:
if len(send_arr)>0:
self.ser.write(send_arr[0])
time.sleep(0.1)
send_arr.remove(send_arr[0])
self.send_arr=send_arr
except Exception:
pass
def __serial_read(self,timeout=5):#串口接收守护
#print("serial_begin")
while True:
try:
enable=self.serial_enable[0]
if not enable:
return
except Exception:
return
tmp_arr=self.rec_arr
rec_str=self.ser.readline()
if rec_str:
try:
rec_str=rec_str.decode('utf-8').strip('\n').strip('\r').replace("'","\"")
tmp_arr.append([rec_str,time.time()])
except Exception:
pass
try:
now_cmd=tmp_arr[0]
if time.time()-now_cmd[1]>timeout:
tmp_arr.remove(now_cmd)
except Exception:
pass
self.rec_arr=tmp_arr
def __callback_th_func(self,callback,snap,timeout=5):
start_time=time.time()
while time.time()-start_time<=timeout:
rec=self.__find_rec_by_snap(snap)
if not rec is None:
break
callback(rec)
def __find_rec_by_snap(self,snap):
msg_arr=[x[0] for x in self.rec_arr]
for i in range(len(msg_arr)):
try:
m=json.loads(msg_arr[i])
if snap==m['snap']:
self.rec_arr.remove(self.rec_arr[i])
return m['data']
except Exception :
return None
return None
def send_data(self,data,timeout=5,callback_func=None):
snap=hashlib.md5()
snap.update(str(time.time()).encode('utf-8'))
snap=snap.hexdigest()[12:-12]
print(snap)
body={}
body['snap']=snap
body['data']=data
body=json.dumps(body,ensure_ascii=False)
body=body+'\r\n'
#print(body)
#self.ser.write(body.encode('utf-8'))
self.send_arr.append(body.encode('utf-8'))
if callback_func is None:
start_time=time.time()
while time.time()-start_time<=timeout:
rec=self.__find_rec_by_snap(snap)
if not rec is None:
break
return rec
else:
callback_th=Thread(target=self.__callback_th_func,args=(callback_func,snap,timeout))
callback_th.setDaemon(True)
callback_th.start()
return None
def __json_loads(self,res):
try:
res=json.loads(res)
except Exception:
pass
return res
#---------------与斑马妈妈连接的API--------------
#-------设备管理--------
def get_clients(self,timeout=5,callback_func=None):#获取设备列表,返回设备id和设备电量
send={'type':'get','key':'client_list'}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
#-------基本输入-------
def get_digital(self,client,timeout=5,callback_func=None):#获取指定精灵的GPIO数字值
send={'type':'get','client':client,'key':'digital'}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
def get_analog(self,client,timeout=5,callback_func=None):#获取指定精灵的GPIO电压值
send={'type':'get','client':client,'key':'analog'}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
#-------基本输出-------
def set_digital(self,client,value,timeout=5,callback_func=None):#设置指定精灵的GPIO数字值
send={'type':'set','client':client,'key':'digital','value':value}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
def set_analog(self,client,value,timeout=5,callback_func=None):#设置指定精灵的GPIO电压值
send={'type':'set','client':client,'key':'analog','value':value}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
#-------串口操作-------
def read_uart(self,client,timeout=5,callback_func=None):#接收指定精灵的收到的串口数据
send={'type':'get','client':client,'key':'uart'}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
def write_uart(self,client,value,timeout=5,callback_func=None):#让指定精灵的串口发送数据
send={'type':'set','client':client,'key':'uart','value':value}
return self.__json_loads(self.send_data(data=send,timeout=timeout,callback_func=callback_func))
#-------I2C操作--------
def read_i2c(self,client,addr,timeout=5,callback_func=None):#读取指定精灵的i2c值
pass
def write_i2c(self,client,addr,value,timeout=5,callback_func=None):#根据地址写入指定精灵的i2c值
pass
#-------舵机操作--------
def set_servo(self,client,angle,timeout=5,callback_func=None):#设置指定精灵上的舵机角度值
pass
#-------步进电机操作-----
def set_motor_step(self,client,step,speed,timeout=5,callback_func=None):#设置指定精灵上步进电机的步数
pass
def set_motor_speed(self,client,speed,timeout=5,callback_func=None):#设置指定精灵上步进电机的转速
pass
#-------直流电机控制操作-----
#设置直流电机输出功率
#-------WS2812B灯带操作-----
#设置某个灯珠的颜色
#通过数组设置灯带颜色
#-------12864显示器操作-----
#设置液晶屏显示内容
#清空液晶屏显示内容
#-------超声波传感器操作-----
#获取超声波传感器的距离值
#-------速度传感器操作-----
#通过回调方式获取速度传感器检测到的值
#-------DHT11温湿度传感器操作-----
#获取传感器的温湿度值
#-------DS18B20温度探头操作-----
#获取传感器的温度值
#-------BMP280大气压强传感器操作-----
#获取大气压强值
#获取环境温度值
#-------姿态传感器操作-----
#获取当前加速度传感器的值
#获取当前角速度传感器的值
#-------GPIO频率获取-----
#获取GPIO口高低电平的变化频率
#获取ADC口的频域列表
#-------设置精灵工作模式操作-----
#开启黑匣子记录模式
#关闭黑匣子记录模式
#进入低功耗状态
#退出低功耗状态
#获取黑匣子记录值
#清空黑匣子记录
def get_clients_dummy():
return ['1', '2']
def get_power_dummy(client):
return 50
def get_serial_ports_dummy():
return ['COM1', 'COM2']
if __name__ == '__main__':
if sys.argv[1] == 'get_clients':
clients = get_clients_dummy()
print(json.dumps(clients))
elif sys.argv[1] == 'get_clients_info':
clients = json.loads(sys.argv[2])
info = []
for client in clients:
info.append([client, 1, get_power_dummy(client)]);
print(json.dumps(info))
elif sys.argv[1] == 'get_serial_ports':
print(json.dumps(get_serial_ports_dummy()))
else:
pass
|
class Gato:
'''Classe para trabalhar com gatos'''
#Construtor
def __init__(self, nome):
self.nome = nome;
print('Seu gato se chama', self.nome)
#Metodos diversos
def peso_gato(self, peso):
self.peso = peso
if (self.peso > 5.0):
print('Seu gato está acima do peso')
elif(self.peso > 3.5):
print('Peso parece normal')
else:
print('Seu gato está abaixo do peso')
#Métodos iniciados com _ são privados e não podem ser invocados
#fora do escopo desta classe
def _dieta_especial_gato(self):
self.msg = 'Tudo OK'
if(self.peso < 3.5):
self.msg = 'Aumente a ração do gato'
if(self.peso >= 5.0):
self.msg = 'Diminua a ração do gato'
return self.msg
#Método criado para acessar o método acima que é privado
def dados_gato(self):
print('\nO gato', self.nome,'está com', self.peso, 'kg')
print(self._dieta_especial_gato())
#Fim do escopo da classe
nome_gato = input('Digite o nome do seu gato : ')
g1 = Gato(nome_gato)
peso = float(input('Digite o peso do gato : '))
g1.peso_gato(peso)
g1.dados_gato()
|
a = 1
b = 2
c =2
a = 2
class info():
def __init__(self):
self.color="red"
|
import speech_recognition as sr
from textblob import TextBlob
from playsound import playsound
from gtts import gTTS
import argparse
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
GOOGLE_CLOUD_SPEECH_CREDENTIALS =
counter = 0
def speaker(toTalk):
tts = gTTS(toTalk)
tts.save('speaking' + str(counter) + '.mp3')
playsound('speaking' + str(counter) + '.mp3')
# speaker("Hello world.")
# counter+=1
# speaker("My name is hal9000")
# counter+=1
# speaker("I am here to listen")
# counter+=1
# speaker("Tell me about your feelings ")
# counter+=1
# obtain audio from the microphone
def listener():
r = sr.Recognizer()
with sr.Microphone() as source:
print("listening")
audio = r.listen(source)
try:
print("speech detected")
speech = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS)
return speech
except sr.UnknownValueError:
print("Google Cloud Speech could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
speech = listener()
print("You said: " + speech)
content = speech
client = language.LanguageServiceClient()
document = types.Document(
content=content,
type=enums.Document.Type.PLAIN_TEXT)
annotations = client.analyze_sentiment(document=document)
magnitude = annotations.document_sentiment.magnitude
print(magnitude)
# blob = TextBlob(speech)
# for sentence in blob.sentences:
# print(sentence.sentiment.polarity)
# happyness = sentence.sentiment.polarity
# if happyness > 0:
# counter +=1
# speaker("Seems like your day has been pretty good! Keep it up!")
# else:
# counter +=2
# speaker("Cheer up, its not too bad. There's always tomorrow!") |
#!/usr/bin/env python
import os
import jinja2
import webapp2
template_dir = os.path.join(os.path.dirname(__file__), "templates")
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=False)
class BaseHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
return self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
return self.write(self.render_str(template, **kw))
def render_template(self, view_filename, params=None):
if params is None:
params = {}
template = jinja_env.get_template(view_filename)
return self.response.out.write(template.render(params))
class MainHandler(BaseHandler):
def get(self):
besedilo="Lorem Ipsum is simply dummy text of the printing and typesetting industry."
params={"tekst":besedilo}
return self.render_template("hello.html", params = params)
class OMeniHandler(BaseHandler):
def get(self):
besedilo1="It has been proven that comprehensible content, while scanning the design solution of a particular page, undesirable redirects the reader's attention. Since Lorem Ipsum has a relatively even distribution of characters, it successfully replaces temporary, substantively meaningful texts. Many desktop publishing programs and online editors use Lorem Ipsum as the default blank text. Therefore, a web search with the keywords lorem ipsum returns many hits to unfinished websites. Over the years, many versions of this blind text have been created, either unplanned or deliberately, with various humorous and other inputs."
params={"tekst1":besedilo1}
return self.render_template("omeni.html", params=params)
class MojiProjektiHandler(BaseHandler):
def get(self):
besedilo2="It has been proven that comprehensible content, while scanning the design solution of a particular page, undesirable redirects the reader's attention. Since Lorem Ipsum has a relatively even distribution of characters, it successfully replaces temporary, substantively meaningful texts. Many desktop publishing programs and online editors use Lorem Ipsum as the default blank text. Therefore, a web search with the keywords lorem ipsum returns many hits to unfinished websites. Over the years, many versions of this blind text have been created, either unplanned or deliberately, with various humorous and other inputs."
params={"tekst2":besedilo2}
return self.render_template("projekti.html", params=params)
class BlogHandler(BaseHandler):
def get(self):
sporocilo = "Na tej strani se nahajajo moji blogi."
blog_posts = [{"title": "Prvi blog", "text": "test, pa da vidimo"},
{"title": "Drugi blog", "text": "test, pa da vidimo drugic"},]
params={"sporocilo2": sporocilo, "blogs": blog_posts}
return self.render_template("blog.html", params=params)
class KontaktHandler(BaseHandler):
def get(self):
podatki="email: ime@gmail.com"
params={"pod":podatki}
return self.render_template("kontakt.html", params=params)
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler),
webapp2.Route('/omeni', OMeniHandler),
webapp2.Route('/projekti', MojiProjektiHandler),
webapp2.Route('/blog', BlogHandler),
webapp2.Route('/kontakt', KontaktHandler),
], debug=True)
|
from typing import Iterable, Any
def ilen(coll: Iterable) -> int:
"""
Функция получения размера генератора
>>> foo = (x for x in range(10))
>>> ilen(foo)
10
"""
counter = 0
for i in coll:
counter++
return counter
def flatten(mas: Iterable[Any]) -> Iterable[Any]:
"""
Функция, которая из многоуровневого массива делает одноуровневый
>>> list(flatten([1, [2, [3, 4]]]))
[1, 2, 3, 4]
"""
for item in mas:
if isinstance(item, (list, tuple, set)):
yield from flatten(item)
elif isinstance(item, (int, str, float, bool)):
yield item
else:
raise ValueError("unexpected type token")
def distinct(coll: Iterable):
"""
Функция, которая удаляет дубликаты с сохранением порядка
>>> list(distinct([1, 2, 0, 1, 3, 0, 2]))
[1, 2, 0, 3]
"""
mas = []
for i in coll:
if i not in mas:
mas.append(i)
yield i
def groupby(coll: Iterable, key):
"""
Функция которая собирает словарь из неупорядоченной последовательности словарей, сгруппированных по ключу
>>> users = [ {'gender': 'female', 'age': 33}, {'gender': 'male', 'age': 20}, {'gender': 'female', 'age': 21}]
>>> groupby(users, 'gender')
{'female': [{'gender': 'female', 'age': 33}, {'gender': 'female', 'age': 21}], 'male': [{'gender': 'male', 'age': 20}]}
"""
tmp = {}
for item in coll:
if item[key] not in tmp:
tmp[item[key]] = []
tmp[item[key]].append(item)
return tmp
def chunks(coll: Iterable, size: int) -> Iterable[Any]:
"""
Функция, которая разбивает последовательность на заданные куски
>>> list(chunks([0, 1, 2, 3, 4], 3))
[(0, 1, 2), (3, 4, None)]
"""
if not isinstance(size, int):
raise TypeError()
if size <= 0:
raise ValueError()
tmp = []
for item in coll:
tmp.append(item)
if len(tmp) == size:
yield tuple(tmp)
tmp = []
if len(tmp) > 0:
tmp.append(None)
yield tuple(tmp)
def first(coll: Iterable) -> Any:
"""
Функция получения первого элемента или None
>>> foo = (x for x in range(10))
>>> first(foo)
0
>>> print(first(range(0)))
None
"""
return next(iter(coll), None)
def last(coll: Iterable) -> Any:
"""
Функция получения последнего элемента или None
>>> foo = (x for x in range(10))
>>> last(foo)
9
>>> print(last(range(0)))
None
"""
counter = None
for counter in coll:
pass
return counter
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from modeltranslation.admin import TranslationAdmin
from snippets.admin import BaseModelAdmin
from snippets.modeltranslation import get_model_translation_fields
from snippets.seo import models
class SEOAdminMixin(object):
"""Миксин для админки"""
suit_form_tabs = (('general', _('Основное')), ('seo', 'SEO'))
class RobotDisallowInline(admin.TabularInline):
"""Директивы Disallow"""
extra = 0
fields = models.RobotDisallow().collect_fields()
model = models.RobotDisallow
ordering = ('ordering',)
readonly_fields = ('created', 'updated')
class RobotAllowInline(admin.TabularInline):
"""Директивы Allow"""
extra = 0
fields = models.RobotAllow().collect_fields()
model = models.RobotAllow
ordering = ('ordering',)
readonly_fields = ('created', 'updated')
class RobotCleanparamInline(admin.TabularInline):
"""Директивы Clean-param"""
extra = 0
fields = models.RobotCleanparam().collect_fields()
model = models.RobotCleanparam
ordering = ('ordering',)
readonly_fields = ('created', 'updated')
class RobotSitemapInline(admin.TabularInline):
"""Директивы Sitemap"""
extra = 0
fields = models.RobotSitemap().collect_fields()
model = models.RobotSitemap
ordering = ('ordering',)
readonly_fields = ('created', 'updated')
@admin.register(models.Robot)
class RobotAdmin(BaseModelAdmin):
"""Роботы (User-Agent)"""
fields = models.Robot().collect_fields()
list_display = ('id', 'title', 'host', 'ordering', 'status')
list_display_links = ('id', 'title')
inlines = (RobotDisallowInline, RobotAllowInline, RobotCleanparamInline, RobotSitemapInline)
save_as = True
search_fields = ('title', 'host')
@admin.register(models.SEOPage)
class SEOPageAdmin(BaseModelAdmin, TranslationAdmin):
"""SEO-параметры страниц"""
group_fieldsets = True
list_display = ('url', 'seo_title', 'ordering', 'status', 'created')
ordering = ('url',)
search_fields = ['url'] + get_model_translation_fields(models.SEOPage)
@admin.register(models.Redirect)
class RedirectAdmin(BaseModelAdmin):
"""HTTP-редиректы"""
fields = models.Redirect().collect_fields()
list_display = ('old_path', 'new_path', 'http_code', 'ordering', 'status', 'created')
ordering = ('old_path',)
search_fields = ('old_path', 'new_path', 'http_code')
|
import http.server
import socketserver
# Обробка запитів клієнта до сервера
handler = http.server.SimpleHTTPRequestHandler
# Сервер буде запущений на порту 1234
with socketserver.TCPServer(("", 1234), handler) as httpd:
# Сервер буди виконуватись постійно
httpd.serve_forever() |
"""
LeetCode - Hard
"""
import ast
import json
"""
Serialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment.
Design an algorithm to serialize and deserialize an N-ary tree. An N-ary tree is a rooted tree in which each node has no more than N children. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that an N-ary tree can be serialized to a string and this string can be deserialized to the original tree structure.
For example, you may serialize the following 3-ary tree
as [1 [3[5 6] 2 4]]. Note that this is just an example, you do not necessarily need to follow this format.
Or you can follow LeetCode's level order traversal serialization format, where each group of children is separated by the null value.
For example, the above tree may be serialized as [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14].
You do not necessarily need to follow the above suggested formats, there are many more different formats that work so please be creative and come up with different approaches yourself.
Constraints:
The number of nodes in the tree is in the range [0, 104].
0 <= Node.val <= 104
The height of the n-ary tree is less than or equal to 1000
Do not use class member/global/static variables to store states. Your encode and decode algorithms should be stateless.
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Codec:
def __init__(self):
self.serializeTable = dict()
self.deserializeTree = None
def serialize(self, root: 'Node') -> str:
"""Encodes a tree to a single string.
:type root: Node
:rtype: str
"""
if root is None:
return None
if root.val not in self.serializeTable:
self.serializeTable[root.val] = []
if root.children:
for child in root.children:
self.serialize(child)
self.serializeTable[root.val].append(child.val)
return str(self.serializeTable)
def deserialize(self, data: str) -> 'Node':
"""Decodes your encoded data to tree.
:type data: str
:rtype: Node
"""
data = eval(data)
print(type(data))
print(data)
self.deserializeHelper(data)
return self.deserializeTree
def deserializeHelper(self, data):
if self.deserializeTree is None:
if __name__ == '__main__':
# Your Codec object will be instantiated and called as such:
NodeA = Node(3, [Node(5, [Node(50)]), Node(6)])
NodeB = Node(2)
NodeC = Node(4, [Node(15), Node(16)])
root = Node(1, [NodeA, NodeB, NodeC])
codec = Codec()
print(codec.serialize(root))
codec.deserialize(codec.serialize(root))
|
from fioo.fioo import * |
from shutil import copy
from os.path import join as path, dirname, abspath, expanduser
from os import remove
tin = path('src', 'bakedbeans', 'tin.template')
# Grab modules from the tin and run them, using the local setup_config.
# We don't attempt to fill in the setup_config template from the tin, because
# there's too much custom stuff needed.
copy(path(tin, 'setup.py'), 'bootstrap.py')
copy(path(tin, 'ez_setup.py'), 'ez_setup.py')
import setup_config, bootstrap
# Now setuptools is ensured to be available.
from setuptools.command.install import install as _install
# http://stackoverflow.com/a/18159969/523612
def make_shortcut(script_dir):
with open(path(expanduser('~'), 'desktop', 'bakedbeans.bat'), 'w') as bat:
bat.write('@echo off\n"{}\\bakedbeans.exe" %*\npause'.format(script_dir))
class install(_install):
def run(self):
super().run()
self.execute(
make_shortcut,
(self.install_scripts,),
msg="Creating desktop shortcut"
)
setup_config.extra_options['cmdclass'] = {'install': install}
bootstrap.do_setup(dirname(abspath(__file__)), setup_config)
remove('ez_setup.py')
remove('bootstrap.py')
|
class GraphTraverser(object):
def __init__(self, graph, eventSet, eventMapping, networkNodes):
self.graph = graph
self.eventSet = eventSet
self.eventMapping = eventMapping
self.networkNodes = networkNodes
def dfs(self, v, reverseList, timestamp, dst, port, src=None):
# print("dfs called")
# print(v.to_string())
# for i in self.graph.predecessors(v):
# print(i.to_string())
if v.type == 'vuln' and v.entry and src not in self.networkNodes:
# reverseList.reverse()
# print("Printing at node {}".format(v.to_string()))
print('')
return self.print_path(reverseList[::-1])
for i in self.graph.predecessors(v):
# print("Predecessor: {}".format(i.to_string()))
if i.type == 'vuln':
description = self.eventMapping[i.vulnerabilityName]
eventList = self.eventSet.containsVulnEvent(description, dst, i.vulnerabilityPort, timestamp)
if eventList:
for event in eventList:
event_string = event['TIMESTAMP'] + ', ' + event['SRCHOST'] + ', ' + event['DSTHOST'] + ', ' + description
# print("Adding event: {}".format(event_string))
reverseList.append(event_string)
self.dfs(i, reverseList, event['TIMESTAMP'], event['DSTHOST'], event['DSTPORT'], event['SRCHOST'])
reverseList.pop()
# print("Returned from state node")
elif i.type == 'state':
self.dfs(i, reverseList, timestamp, src, port)
# print("Returned from vuln node")
def start_traversal(self, timestamp, src, dst, port, description, accessLevel):
reverseList = []
reverseList.append('Notable event: ' + str(timestamp) + ', ' + src + ', '+ dst + ', ' + description)
notableEventNode = self.find_node(src, accessLevel)
if notableEventNode:
eventSequence = self.dfs(notableEventNode, reverseList, timestamp, src, port)
else:
print("The attacker cannot have access level {} at host {}".format(accessLevel, src))
def find_node(self, dst, accessLevel):
for i in self.graph.nodes:
if i.type == 'state' and i.hostname == dst and i.accessLevel == accessLevel:
return i
def print_path(self, list):
print("Entry: {}".format(list[0]))
for i in list[1:]:
print(' -> ' + i) |
import glob, random, os, json
files = glob.glob(
"assets/images/thumbs/*.jpg")
captions = {}
for file in files:
filename = file.split("/")[-1].split(".")[0]
captions[filename] = filename
fw = open("data/full-captions.json", 'w')
json.dump(captions, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
import time
from openerp.osv import fields, osv
from report import report_sxw
from openerp.tools.translate import _
import logging
_logger = logging.getLogger('reportes')
class reportes_reportc(report_sxw.rml_parse):
total_exento = 0.0
total_cf = 0.0
total_per = 0.0
total_pro = 0.0
rectificador = 0.0
rectificador_neto = 0.0
rectificador_iva = 0.0
documentos = 0.0
total_neto = 0.0
gran_exento = 0.0
gran_neto = 0.0
gran_iva = 0.0
gran_iva_per = 0.0
gran_iva_pro = 0.0
gran_total = 0.0
boolean_nota_credito = 0.0
valor_nota = 0.0
valor_iva_nota = 0.0
valor_neto_nota = 0.0
contador = 0.0
mixta = False
ajustador = 1.0
def __init__(self, cr, uid, name, context):
super(reportes_reportc, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'_periodos_v': self._periodos_v,
'corto_dat_v': self.corto_dat_v,
'get__v': self._get__v,
'nuevo':self.nuevo,
'detalle':self.detalle,
'subtotales':self.subtotales,
'totales':self.totales
})
def _periodos_v(self, period_list):
aux_ = 0
feci = 0
fecf = 0
for period_id in period_list:
if aux_ == 0:
self.cr.execute("select name from account_period where id=" + str(period_id) + "")
for record in self.cr.fetchall():
feci = record[0]
aux_ = aux_ + 1
self.cr.execute("select name from account_period where id=" + str(period_id) + "")
for record in self.cr.fetchall():
fecf = record[0]
return 'Desde ' + feci + ' Hasta ' + fecf
def corto_dat_v(self, arg1, largo):
if len(arg1) > largo:
descripcion = arg1[:largo - 1]
else:
descripcion = arg1
return descripcion
def _get__v(self, co, pe, si, ty):
d = []
Lds = ''
Lds_ = ''
cc = 0
# cl=0
tpOo = 0
aeOo = 0
aeS = 0
txS = 0
unS = 0
toS = 0
cl = 0
aeT = 0
txT = 0
unT = 0
toT = 0
d.append({'auxiliar':'t', })
for p in pe:
Lds = Lds + str(p) + ","
while cc < len(Lds) - 1:
Lds_ = Lds_ + Lds[cc]
cc = cc + 1
subtotales
print("construyo la query")
sql = "SELECT ai.reference,date_invoice,rp.rut, rp.name, aj.code, ai.amount_untaxed, ai.amount_tax, ai.amount_total, ai.fiscal_position, (select CASE WHEN sum(ait.base_amount) is null then 0 else sum(ait.base_amount) end as a from account_invoice_tax ait where UPPER(ait.name) like UPPER('%exento%') and ait.invoice_id = ai.id) base_amount FROM public.account_invoice ai, public.account_journal aj, public.res_partner rp WHERE ai.state not in ('draft', 'cancel') and ai.partner_id = rp.id AND aj.id = ai.journal_id and aj.code between '100' and '119' and ai.period_id in (" + "".join(map(str, Lds_)) + ") and ai.company_id = " + str(co[0]) + " order by date_invoice"
print(sql)
self.cr.execute(sql)
for record in self.cr.fetchall():
# print("recorro la query")
print record[8]
nmOo = record[0]
dtOo = record[1]
rtOo = record[2]
clOo = record[3]
tpOo = ""
aeOo = record[9]
if record[4] == "101":
tpOo = "FN"
elif record[4] == "102":
tpOo = "FE"
elif record[4] == "103":
tpOo = "FI"
elif record[4] == "":
tpOo = "SC"
txOo = record[6] # tax
unOo = record[5] # untaxed
toOo = record[7] # total
if cl == 56:
OoO = {'auxiliar':'tT'}
d.append(OoO)
OoO = {
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'SUB TOTAL',
'afe_exe':self.formatLang(aeS, digits=0),
'iva': self.formatLang(txS, digits=0),
'neto_': self.formatLang(unS, digits=0),
'total_': self.formatLang(toS, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
aeS = 0
txS = 0
unS = 0
toS = 0
cl = 0
d.append({'auxiliar':'t', })
OoO = {
'number': nmOo,
'x_tipo_doc': tpOo,
'date_invoice': dtOo,
'rut': rtOo,
'proveedor': clOo,
'afe_exe':self.formatLang(aeOo, digits=0),
'iva': self.formatLang(txOo, digits=0),
'neto_': self.formatLang(unOo, digits=0),
'total_': self.formatLang(toOo, digits=0),
'auxiliar':'d'
}
# sub total
aeS += aeOo
txS += txOo
unS += unOo
toS += toOo
d.append(OoO)
# total final
aeT += aeOo
txT += txOo
unT += unOo
toT += toOo
cl = cl + 1
# preguntar k onda
OoO = {
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'SUB TOTAL',
'afe_exe':self.formatLang(aeS, digits=0),
'iva': self.formatLang(txS, digits=0),
'neto_': self.formatLang(unS, digits=0),
'total_': self.formatLang(toS, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
OoO = {
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'TOTAL',
'afe_exe':self.formatLang(aeT, digits=0),
'iva': self.formatLang(txT, digits=0),
'neto_': self.formatLang(unT, digits=0),
'total_': self.formatLang(toT, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
aeS = 0
txS = 0
unS = 0
toS = 0
return d
def nuevo(self, co, pe, si, ty):
data = []
periodos = ",".join(map(str, pe))
sql = """
select id, name from account_journal aj where aj.name not in ('FACTURA DE IMPORTACION','DIARIO DE EXPORTACION')
and id in (
select journal_id
from account_invoice ai
where ai.state not in ('draft', 'cancel')
and ai.period_id in ({0})
and ai.company_id = {1}
and ai.type in ('in_invoice', 'in_refund')
)
""".format(periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data) + 1, {'id':record[0],
'name':record[1],
})
return data
def conversor(self, numero):
for z in numero:
numero_total = ''
if z != '.':
numero_total += z
return numero_total
def detalle(self, journal_id, co, pe, si, ty):
data = []
periodos = ",".join(map(str, pe))
sql = """select
ai.reference
,date_invoice
,rp.vat
, rp.name
, ai.amount_untaxed
, ai.amount_tax
, ai.amount_total
, ai.fiscal_position
, (select CASE WHEN sum(ait.base_amount) is null then 0
else sum(ait.base_amount) end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%test%')
and ait.invoice_id = ai.id) base_amount
, ai.type
, ai.id
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id = {0}
and ai.period_id in ({1})
and ai.company_id = {2}
order by date_invoice;
""".format(journal_id, periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
print record
monto_exento = 0.0
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
monto_neto = 0.0
self.documentos += 1
# if record[10] == 'in_refund':
# self.rectificador += record[6]
# self.rectificador_neto += record[4]
# self.rectificador_iva += record[5]
#raise osv.except_osv('warning', 'Object %s ' % record[5])
if(record[5] < 1):
monto_exento = record[4]
monto_neto = 0.0
self.total_exento += monto_exento
else:
monto_exento = 0.0
monto_neto = record[4]
self.total_neto = self.total_neto + monto_neto
if record[4] * 1.19 > (record[6]+10): # factura de compra mixta
self.mixta = True
monto_neto = record[5] / 0.19
monto_exento = record[4] - monto_neto
self.total_neto = self.total_neto - monto_exento
self.total_exento = self.total_exento + monto_exento
paso = record[10]
# raise osv.except_osv('Object Error', 'Object %s doesn\'t exist' % paso)
sqliva = 'select name from account_invoice_tax where invoice_id=%s' % paso
self.cr.execute(sqliva)
registro = self.cr.fetchall()
# raise osv.except_osv('Object Error', 'Object %s doesn\'t exist' % registro)
if(registro):
if(registro[0][0] == 'None'):
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
# raise osv.except_osv('Object Error', 'none%s' % registro)
elif(registro[0][0] == 'IVA Pro - IVA Proporcional'):
ivacf = 0.0
ivaper = 0.0
ivapro = record[5]
self.total_pro += ivapro
# raise osv.except_osv('Object Error', '%s pro' % registro)
elif(registro[0][0] == 'IVA Per - IVA Perdida'):
ivacf = 0.0
ivaper = record[5]
ivapro = 0.0
self.total_per += ivaper
# raise osv.except_osv('Object Error', 'Object %s per' % registro)
else:
ivacf = record[5]
ivaper = 0.0
ivapro = 0.0
self.total_cf += ivacf
# raise osv.except_osv('Object Error', 'Object %s ivacf' % registro)
else:
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
monto_exento1=str(self.formatLang(monto_exento, digits=0))
monto_exento2=self.conversor(monto_exento1)
self.gran_exento = self.gran_exento + int(monto_exento2)
monto_neto1=str(self.formatLang(monto_neto, digits=0))
monto_neto2=self.conversor(monto_neto1)
self.gran_neto = self.gran_neto + int(monto_neto2)
monto_total1=str(self.formatLang(record[6], digits=0))
monto_total2=self.conversor(monto_total1)
self.gran_total = self.gran_total + int(monto_total2)
monto_ivacf1=str(self.formatLang(ivacf, digits=0))
monto_ivacf2=self.conversor(monto_ivacf1)
self.gran_iva = self.gran_iva + int(monto_ivacf2)
monto_ivaper1=str(self.formatLang(ivaper, digits=0))
monto_ivaper2=self.conversor(monto_ivaper1)
self.gran_iva_per = self.gran_iva_per + int(monto_ivaper2)
monto_ivapro1=str(self.formatLang(ivapro, digits=0))
monto_ivapro2=self.conversor(monto_ivapro1)
self.gran_iva_pro = self.gran_iva_pro + int(monto_ivapro2)
sql7 = """select type from account_journal where id = {0}""".format(journal_id)
self.cr.execute(sql7)
for buscacode in self.cr.fetchall():
code = buscacode[0]
if code == 'purchase_refund':
print journal_id
monto_total3=str(self.formatLang(record[6], digits=0))
monto_total4=self.conversor(monto_total3)
self.gran_total = self.gran_total - int(monto_total4) - int(monto_total4)
monto_neto3=str(self.formatLang(monto_neto, digits=0))
monto_neto4=self.conversor(monto_neto3)
self.gran_neto = self.gran_neto - int(monto_neto4) - int(monto_neto4)
monto_exento3=str(self.formatLang(monto_exento, digits=0))
monto_exento4=self.conversor(monto_exento3)
self.gran_exento = self.gran_exento - int(monto_exento4) - int(monto_exento4)
monto_ivacf3=str(self.formatLang(ivacf, digits=0))
monto_ivacf4=self.conversor(monto_ivacf3)
self.gran_iva = self.gran_iva - int(monto_ivacf4) - int(monto_ivacf4)
monto_ivaper3=str(self.formatLang(ivaper, digits=0))
monto_ivaper4=self.conversor(monto_ivaper3)
self.gran_iva_per = self.gran_iva_per - int(monto_ivaper4) - int(monto_ivaper4)
monto_ivapro3=str(self.formatLang(ivapro, digits=0))
monto_ivapro4=self.conversor(monto_ivapro3)
self.gran_iva_pro = self.gran_iva_pro - int(monto_ivapro4) - int(monto_ivapro4)
date = record[1]
date = date[8] + date[9] + date[7] + date[5] + date[6] + date[4] + date[0] + date[1] + date[2] + date[3]
data.insert(len(data) + 1,
{
'number': record[0],
'x_tipo_doc': "",
'date_invoice': date,
'rut': record[2],
'proveedor': record[3],
'afe_exe':self.formatLang(monto_exento, digits=0),
# 'afe_exe':self.formatLang(record[4], digits=0),
# 'cc_amount_untaxed': self.formatLang(record[4], digits=0),
'cc_amount_untaxed': self.formatLang(monto_neto, digits=0),
'cc_amount_tax': self.formatLang(ivacf, digits=0),
'cc_tax_pro': self.formatLang(ivapro, digits=0),
'cc_tax_per': self.formatLang(ivaper, digits=0),
'cc_amount_total': self.formatLang(record[6], digits=0),
'auxiliar':'d',
# 'gran_numero_doctos': self.documentos,
# 'gran_exento': self.gran_exento,
# 'gran_neto': self.gran_neto,
# 'gran_iva' : self.gran_iva,
# 'gran_total' : self.gran_total
'gran_numero_doctos': self.formatLang(self.documentos, digits=0),
'gran_exento': self.formatLang((self.gran_exento), digits=0),
'gran_neto': self.formatLang((self.gran_neto), digits=0),
'gran_iva': self.formatLang((self.gran_iva), digits=0),
'gran_iva_per': self.formatLang((self.gran_iva_per), digits=0),
'gran_iva_pro': self.formatLang((self.gran_iva_pro), digits=0),
'gran_total': self.formatLang((self.gran_total), digits=0)
})
# if journal_id == 48 or journal_id == 49:
# tata = 0
# tata = record[6]
# tata_neto=0
# tata_neto=monto_neto
# tata_exe=0
# tata_exe=monto_exento
# tata_iva=0
# tata_iva=ivacf
# self.gran_total = self.gran_total-(tata*2)
# self.gran_neto = self.gran_neto-(tata_neto*2)
# self.gran_exento = self.gran_exento - (tata_exe*2)
# self.gran_iva = self.gran_iva - (tata_iva*2)
return data
def conversor(self, numero):
numero_total = ''
for z in numero:
if z != '.':
numero_total += z
return numero_total
def subtotales(self, journal_id, co, pe, si, ty):
monto_total = 0.0
total = 0.0
tax = 0.0
tax_per = 0.0
tax_pro = 0.0
exento = 0.0
neto = 0.0
data = []
periodos = ",".join(map(str, pe))
sql = """select
ai.reference
,date_invoice
,rp.vat
, rp.name
, ai.amount_untaxed
, ai.amount_tax
, ai.amount_total
, ai.fiscal_position
, (select CASE WHEN sum(ait.base_amount) is null then 0
else sum(ait.base_amount) end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%test%')
and ait.invoice_id = ai.id) base_amount
, ai.type
, ai.id
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id = {0}
and ai.period_id in ({1})
and ai.company_id = {2}
order by date_invoice;
""".format(journal_id, periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
print record
monto_exento = 0.0
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
monto_neto = 0.0
#self.documentos += 1
# if record[10] == 'in_refund':
# self.rectificador += record[6]
# self.rectificador_neto += record[4]
# self.rectificador_iva += record[5]
if(record[5] < 1):
monto_exento = record[4]
monto_neto = 0.0
self.total_exento += monto_exento
# record[4]=0
else:
monto_exento = 0.0
monto_neto = record[4]
self.total_neto = self.total_neto + monto_neto
if record[4] * 1.19 > (record[6]+10): # factura de compra mixta
self.mixta = True
monto_neto = record[5] / 0.19
monto_exento = record[4] - monto_neto
self.total_neto = self.total_neto - monto_exento
self.total_exento = self.total_exento + monto_exento
# if(record[9] == 'IVA 19% Compra'):
sqliva = 'select name from account_invoice_tax where invoice_id=%s' % record[10]
self.cr.execute(sqliva)
registro = self.cr.fetchall()
if(registro):
if(registro[0][0] == 'None'):
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
elif(registro[0][0] == 'IVA Pro - IVA Proporcional'):
ivacf = 0.0
ivaper = 0.0
ivapro = record[5]
self.total_pro += ivapro
elif(registro[0][0] == 'IVA Per - IVA Perdida'):
ivacf = 0.0
ivaper = record[5]
ivapro = 0.0
self.total_per += ivaper
else:
ivacf = record[5]
ivaper = 0.0
ivapro = 0.0
self.total_cf += ivacf
else:
ivacf = 0.0
ivaper = 0.0
ivapro = 0.0
monto_total = record[6] # acumulando para pasar al subtotal
# exento+=monto_exento
exento += int(self.conversor(self.formatLang(monto_exento, digits=0)))
neto += int(self.conversor(self.formatLang(monto_neto, digits=0)))
tax += int(self.conversor(self.formatLang(ivacf, digits=0)))
tax_per += int(self.conversor(self.formatLang(ivaper, digits=0)))
tax_pro += int(self.conversor(self.formatLang(ivapro, digits=0)))
total += int(self.conversor(self.formatLang(monto_total, digits=0)))
self.contador += 1
data.insert(len(data) + 1,
{
'cantidad':self.contador
, 'base_amount':self.formatLang(exento, digits=0)
, 'amount_untaxed':self.formatLang(neto, digits=0)
, 'amount_tax':self.formatLang(tax, digits=0)
, 'amount_tax_per':self.formatLang(tax_per, digits=0)
, 'amount_tax_pro':self.formatLang(tax_pro, digits=0)
, 'amount_total':self.formatLang(total, digits=0)
})
exento = 0.0
neto = 0.0
tax = 0.0
tax_per = 0.0
tax_pro = 0.0
self.contador = 0.0
total = 0.0
return data
def totales(self, co, pe):
periodos = ",".join(map(str, pe))
data = []
sql = """select sum(cantidad) cantidad, sum(amount_untaxed) amount_untaxed, sum(amount_tax) amount_tax,sum(amount_total) amount_total, sum(base_amount) base_amount
from (
select count(*) as cantidad
, coalesce(sum(ai.amount_untaxed),0) amount_untaxed
, coalesce(sum(ai.amount_tax),0) amount_tax
, coalesce(sum(ai.amount_total),0) amount_total
, coalesce(sum((
select
CASE WHEN sum(ait.base_amount) is
null
then 0 else sum(ait.base_amount)
end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%iva%')
and ait.invoice_id = ai.id
)),0) base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id in (
select id from account_journal aj where aj.code between '100' and '119' and not
UPPER(name) like UPPER('%nota%') and not UPPER(name) like UPPER('%credito%')
)
and ai.period_id in ({0})
and ai.company_id = {1}
union
select count(*)*-1 as cantidad
, coalesce(sum(ai.amount_untaxed),0)*-1 amount_untaxed
, coalesce(sum(ai.amount_tax),0)*-1 amount_tax
, coalesce(sum(ai.amount_total),0)*-1 amount_total
, coalesce(sum((
select
CASE WHEN sum(ait.base_amount) is
null
then 0 else sum(ait.base_amount)
end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%iva%')
and ait.invoice_id = ai.id
)),0)*-1 base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id in (
select id from account_journal aj where aj.code between '100' and '119' and
UPPER(name) like UPPER('%nota%') and UPPER(name) like UPPER('%credito%')
)
and ai.period_id in ({0})
and ai.company_id = {1}
) as a
""".format(periodos, str(co[0]))
print(sql)
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data) + 1,
{
'cantidad':self.formatLang(self.documentos, digits=0)
, 'base_amount':self.formatLang((self.gran_exento), digits=0)
, 'amount_untaxed':self.formatLang((self.gran_neto), digits=0)
, 'amount_tax':self.formatLang((self.gran_iva), digits=0)
, 'amount_tax_per':self.formatLang((self.gran_iva_per), digits=0)
, 'amount_tax_pro':self.formatLang((self.gran_iva_pro), digits=0)
, 'amount_total':self.formatLang((self.gran_total), digits=0)
})
# raise osv.except_osv(_('Aviso!'),_(self.valor_nota))
return data
report_sxw.report_sxw('report.reportes_print_libcom', 'reportes',
'addons/reportes/reportes_reportc.rml', parser=reportes_reportc, header=False)
|
file_name = 'learning_python.txt'
with open(file_name) as file_object:
content_0 = file_object.read()
print(content_0)
print("-----")
with open(file_name) as file_object:
line = file_object.readline()
print(line)
print("-----")
with open(file_name) as file_object:
lines = file_object.readlines()
print(lines)
|
"""
Queries of label queries
"""
def gql_labels(fragment):
"""
Return the GraphQL labels query
"""
return f'''
query ($where: LabelWhere!, $first: PageSize!, $skip: Int!) {{
data: labels(where: $where, first: $first, skip: $skip) {{
{fragment}
}}
}}
'''
GQL_LABELS_COUNT = '''
query($where: LabelWhere!) {
data: countLabels(where: $where)
}
'''
|
import random
import itertools
word = "SOS"
rows = None
cols = None
def searchWord(grid,i,j,word,direction):
flag=True
if direction == "orizontia":
j=j+1
for k in range(1,len(word)):
if j<cols and word[k]==grid[i][j]:
j=j+1
else:
flag=False
break
elif direction == "ka8eta":
i=i+1
for k in range(1,len(word)):
if i<rows and word[k]==grid[i][j]:
i=i+1
else:
flag=False
break
elif direction == "diagonia1":
i=i-1
j=j+1
for k in range(1,len(word)):
if i>=0 and j<cols and word[k]==grid[i][j]:
i=i-1
j=j+1
else:
flag=False
break
elif direction == "diagonia2":
i=i+1
j=j-1
for k in range(1,len(word)):
if i>rows and j>=0 and word[k]==grid[i][j]:
i=i+1
j=j-1
else:
flag=False
break
if flag:
return True
return False
rows = int(input("Enter rows: "))
cols = int(input("Enter columns: "))
positions = rows*cols
rows_index = [k for k in range(0,rows)]
cols_index = [k for k in range(0,cols)]
count=0
for a in range(1,100):
grid=[]
for i in range(rows):
grid.append(['S' for k in range(cols)])
pickO = random.sample(set(itertools.product(rows_index,cols_index)),int(positions/2))
for i in pickO:
grid[i[0]][i[1]]='O'
for i in range(rows):
for j in range(cols):
if grid[i][j] == word[0]:
if searchWord(grid,i,j,word,"orizontia"):
count+=1
if searchWord(grid,i,j,word,"ka8eta"):
count+=1
if searchWord(grid,i,j,word,"diagonia1"):
count+=1
if searchWord(grid,i,j,word,"diagonia2"):
count+=1
print("Mesos oros 3adwn SOS: "+str(count/100)) |
"""
Quick Sort:
Time Complexity :
1) Average case : O(n log n)
2) Worst Case : O(n^2)
"""
def quicksort(arr):
size_arr = len(arr)
if size_arr < 2:
return arr
if size_arr == 2:
if arr[0] > arr[1]:
arr[0], arr[1] = arr[1], arr[0]
return arr
pivot = arr[0]
left_arr = [i for i in arr[1:] if i < pivot]
right_arr = [i for i in arr[1:] if i > pivot]
return quicksort(left_arr) + [pivot] + quicksort(right_arr)
if __name__=="__main__":
arr = [9, 3,2 ,1, 8, 10, 4]
sorted_arr = quicksort(arr)
for i in range(len(sorted_arr)):
print(sorted_arr[i] , end=" ")
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/ptok")
def ptok():
return render_template("ptok.html")
@app.route("/goodbye")
def goodbye():
return render_template("goodbye.html")
@app.route("/listdic")
def listdic():
page="<h1>this a cool list heehee</h1>"
list = [0, 1 , 2, 3, 4, 5, 6, 7, 8, 9, 10]
dic = {"a" : 10, "b" : 11, "c" : 12, "d" : 13, "e":14, "f":15}
mathanswer = list[5] + dic["a"]
page = page + '<br><a href="/home">go home dude</a>'
page = page + '<br>5 + a in base 10 is 15. <br> list[5] + dic["a"] =' + str(mathanswer)
if ( mathanswer == 15 ):
page = page + '<br>CORRECT list[5] + dic["a] does equal 15 :) :)</br>'
else:
page = page + '<br>INCORRECT list[5] + dic["a] does NOT equal 15 :('
page = page + '<br><a href="/goodbye">bye</a>'
return page
@app.route("/")
@app.route("/home")
def home():
page="<h1>Pounds to kilo chart In the Below Link</h1>"
page = page + '<br><a href="/ptok">Pounds to Kilos</a>'
page = page + '<br><a href="/goodbye">bye</a>'
page = page + '<br><a href="/listdic">List</a>'
page = page + "<br><h2>Click above</h2>"
return page
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0',port=8000)
|
from django.test import TestCase
from util.util import UtilABNT
# class TestUtil(TestCase):
# """
# Testes da classe UtilABNT
# """
# def test_nome_comum_bem_formatado(self):
# self.assertEquals('José', UtilABNT.nome_abnt(self, 'José Saramago'))
#
# def test_sobrenome_comum_bem_formatado(self):
# self.assertEquals('Saramago', UtilABNT.sobrenome_abnt(self, 'José Saramago'))
|
##################################################
# PRICING A DOWN-AND-OUT BARRIER PUT OPTION
# stock obeys GBM with r=0.1, s=0.4 (time unit = year = 252 days), current
# price 50. 60 day european put option, with strike 50, but a barrier at 30
# - below this the option gets knocked out thus reducing risk for seller.
# daily observation. use "control variable" of a regular european put
import numpy as np
from numpy import random as rn
import scipy.stats as ss
#Black and Scholes
def d1(S0, K, r, sigma, T):
return (np.log(S0/K) + (r + sigma**2 / 2) * T)/(sigma * np.sqrt(T))
def d2(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r - sigma**2 / 2) * T) / (sigma * np.sqrt(T))
def blsprice(type,S0, K, r, sigma, T):
if type=="C":
return S0 * ss.norm.cdf(d1(S0, K, r, sigma, T)) - K * np.exp(-r * T) * ss.norm.cdf(d2(S0, K, r, sigma, T))
else:
return K * np.exp(-r * T) * ss.norm.cdf(-d2(S0, K, r, sigma, T)) - S0 * ss.norm.cdf(-d1(S0, K, r, sigma, T))
S0=50;
K=50;
r=0.1;
sigma=0.4;
T=60/252; # up to here usual parameters for a put
B=35; # barrier - see what happens when increase to 35 and 40!
N=60; # number of observations
h=T/N;
V_P=blsprice('P',S0, K, r, sigma, T); #E[Y]
###################################################
# part 1: estimate the correlation of the barrier and vanilla options
M=5*10**3;
Z=rn.randn(M,N);
S=np.ones((M,N+1));
S[:,0]=S0*np.ones(M);
for i in range(0,N):
S[:,i+1]=S[:,i]*np.exp((r-sigma**2/2)*h+sigma*np.sqrt(h)*Z[:,i]);
worst=np.min(S,1);
payoff=np.exp(-r*T)*(worst>B)*(S[:,N]<K)*(K-S[:,N]);#X
payoff2=np.exp(-r*T)*(S[:,N]<K)*(K-S[:,N]); #Y
q=np.cov(payoff,payoff2)
#print(q)# show the covariance matrix
c=-q[0,1]/q[1,1];
#print(c) # show c
###################################################
# part 2: the real simulation
M=45*10**3; # number of replications
Z=rn.randn(M,N);
S=np.ones((M,N+1));
S[:,0]=S0*np.ones(M);
for i in range(0,N):
S[:,i+1]=S[:,i]*np.exp( (r-sigma**2/2)*h+sigma*np.sqrt(h)*Z[:,i]);
worst=np.min(S,1);
payoff=np.exp(-r*T)*(worst>B)*(S[:,N]<K)*(K-S[:,N]);#X
payoff2=np.exp(-r*T)*(S[:,N]<K)*(K-S[:,N]); #Y
corrected=payoff+c*(payoff2-V_P);#X_C=X+c*(Y-E[Y])
controlled=[np.mean(corrected),np.std(corrected)/np.sqrt(M)];controlled # answers with control variable
uncontrolled=[np.mean(payoff),np.std(payoff)/np.sqrt(M)];uncontrolled # answers without control variable
vanilla = [np.mean(payoff2),np.std(payoff2)/np.sqrt(M)];vanilla # simulated answers for vanilla option
################################
print(controlled)
print(uncontrolled)
print(vanilla) |
import cv2
import numpy as np
img = cv2.imread('Lenna.png')
rows, cols = img.shape[:2]
M=np.float32([[1,0,40],[0,1,40]])
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow('Original',img)
cv2.imshow('Traslation',dst)
cv2.waitKey(0)
cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.