text stringlengths 8 6.05M |
|---|
"""
Week 5, Day 3: Edit Distance
Given two words word1 and word2, find the minimum number of operations required to convert
word1 to word2.
You have the following 3 operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
"""
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
"""
Levenshtein algorithm, plus dynamic programming.
Ref.: https://en.wikipedia.org/wiki/Edit_distance
Ref.: https://en.wikipedia.org/wiki/Levenshtein_distance
A standard of word processing.
:param word1: a string
:param word2: another string
:return: Levenshtein distance of both words
"""
M, N = len(word1), len(word2)
mem = [[0] * (N + 1) for _ in range(M + 1)]
for i in range(1, M + 1): # cost for deletion of word 1
mem[i][0] = i
for j in range(1, N + 1): # cost for deletion of word 2
mem[0][j] = j
for j, a in enumerate(word2, 1):
for i, b in enumerate(word1, 1):
k = 0 if a == b else 1
i1, j1 = i - 1, j - 1
mem[i][j] = min(
mem[i1][j] + 1, # deletion
mem[i][j1] + 1, # insertion
mem[i1][j1] + k # substitution
)
return mem[-1][-1]
if __name__ == '__main__':
o = Solution()
print(o.minDistance(word1='kitten', word2='sitting') == 3)
print(o.minDistance(word1="horse", word2="ros") == 3)
print(o.minDistance(word1="intention", word2="execution") == 5)
print(o.minDistance(word1='', word2='a') == 1)
print(o.minDistance(word1='a', word2='b') == 1)
print(o.minDistance(word1='a', word2='a') == 0)
print(o.minDistance(word1='a', word2='ab') == 1)
print(o.minDistance(word1='ba', word2='ab') == 2)
# last line of code
|
#-*- encoding:utf-8 -*-
from hello import AwardGrade,db
db.session.add(AwardGrade(AGname='三等奖'))
db.session.add(AwardGrade(AGname='二等奖'))
db.session.add(AwardGrade(AGname='一等奖'))
db.session.add(AwardGrade(AGname='特等奖'))
db.session.commit()
|
# following PEP 386
__version__ = "2.1.1"
|
class Tweet(object):
__author = ""
__text = ""
__location = ""
#__longitude = ""
#__latitude = ""
__creation = ""
#def __init__(self, author, text, longitude, latitude):
def __init__(self, author, text, location, creation):
self.__author = author
self.__text = text
self.__location = location
#self.__longitude = longitude
#self.__latitude = latitude
self.__creation = creation
def get_author(self):
return self.__author
def set_author(self, author):
self.__author = author
def get_text(self):
return self.__text
def set_text(self, text):
self.__text = text
def get_location(self):
return self.__location
def set_location(self, location):
self.__location = location
def get_creation(self):
return self.__creation
def set_creation(self, latitude):
self.__creation = creation
def to_string(self):
return "['{0}','{1}','{2}','{3}']".format(self.get_author(), self.get_text(), self.get_location(), self.get_creation())
'''
t = Tweet("Javi", "Wassup", "100,300", "5")
print t.to_string()
'''
|
import node_preprocess
import networkx as nx
import csv
import matplotlib.pyplot as plt
def getRightCore():
result = {}
with open ("./CoreNumbers") as f:
csv_file = csv.reader(f, delimiter=':')
for row in csv_file:
result[row[0]] = row[1]
return result
def getRightPeak():
result = {}
with open ("./test") as f:
csv_file = csv.reader(f, delimiter=':')
for row in csv_file:
result[row[0]] = row[1]
return result
G = nx.Graph()
preprocess = node_preprocess.node_preprocess('./grad_edges.txt', G)
preprocess.getGraph()
"""k_core, edge_core = preprocess.k_core(16)
result = {}
for node in G.__iter__():
for i in k_core:
for j in k_core[i]:
result[j] = i
count = 0"""
result2 = getRightCore()
"""for i in result:
if int(result[i]) != int(result2[i]):
print (result[i], result2[i])
count += 1
num = nx.number_of_nodes(G)
print ("Accurate rate: ", count/num)"""
k_core = preprocess.k_core2(16, G.copy())
result = {}
for i in k_core:
for j in k_core[i]:
result[j] = i
count = 0
for i in result:
if int(result[i]) != int(result2[i]):
print (result[i], result2[i])
count += 1
num = nx.number_of_nodes(G)
print ("Accurate rate: ", 1 - count/num)
Gc = G.copy()
result2 = getRightPeak()
k_peak = preprocess.k_peak2()
result = {}
total = 0
for i in k_peak:
for j in k_peak[i]:
result[j] = i
total+=1
count = 0
for i in result:
if int(result[i]) != int(result2[i]):
print ("node: ", i, result[i], result2[i])
count += 1
num = nx.number_of_nodes(G)
print ("Accurate rate: ", 1 - count/num)
print ("Total nodes: ", total)
node = '5363'
G2 = nx.Graph()
edge_list = preprocess.getEdgeList(node)
edgeList = [(node, x) for x in edge_list]
G2.add_edges_from(edgeList)
pos = nx.spring_layout(G)
nx.draw(G2)
plt.show()
|
kor_score = [49, 79, 20, 100, 80]
math_score = [43, 59, 85, 30 ,90]
eng_score = [49, 79, 48, 60, 100]
midterm_score = [kor_score, math_score, eng_score]
student_scroe = [0,0,0,0,0]
i =0
for subjust in midterm_score:
for score in subjust:
print(score)
student_scroe[i] += score
i += 1
print(student_scroe)
i=0
else:
a,b,c,d,e = student_scroe
student_average = [a/3, b/3, c/3, d/3, e/3]
print(student_average)
|
from django.db import models
import random
from django import forms
class Wallet():
def __init__(self, request):
self.total_gold = 0
self.request = request
self.activites = []
if "total_gold" in request.session:
self.total_gold = request.session['total_gold']
if "activites" in request.session:
self.activites = request.session['activites']
def find_gold_in_farm(self):
random_gold = random.randrange(10, 20)
self.total_gold += random_gold
self.activites.append({'text': "Earned " + str(random_gold) + " from the farm.", 'isPositive': True})
self.save_in_session()
def find_gold_in_cave(self):
random_gold = random.randrange(5, 10)
self.total_gold += random_gold
self.activites.append({'text': "Earned " + str(random_gold) + " from the cave.", 'isPositive': True})
self.save_in_session()
def find_gold_in_house(self):
random_gold = random.randrange(2, 5)
self.total_gold += random_gold
self.activites.append({'text': "Earned " + str(random_gold) + " from the house.", 'isPositive': True})
self.save_in_session()
def find_gold_in_casino(self):
random_gold = int(random.uniform(-50, 50))
self.total_gold += random_gold
if random_gold > 0:
self.activites.append({'text': "Earned " + str(random_gold) + " from the casino.", 'isPositive': True})
else:
self.activites.append({'text': "Lost " + str(random_gold) + " from the casino.", 'isPositive': False})
self.save_in_session()
def save_in_session(self):
self.request.session['total_gold'] = self.total_gold
self.request.session['activites'] = self.activites
def find_gold(self, location):
if location == "farm":
self.find_gold_in_farm()
elif location == "cave":
self.find_gold_in_cave()
elif location == "house":
self.find_gold_in_house()
elif location == "casino":
self.find_gold_in_casino()
class Form(forms.Form):
ninja = forms.CharField()
|
from time import time
import kenlm
import sys
"""
usage: python language_model_test.py ../../lib/kenlm/models/ngram_lm.trie
"""
def main():
if len(sys.argv) < 2:
print("Usage: give path parameter")
else:
_, model_path = sys.argv
model = kenlm.Model(model_path)
input = ''
while(input != 'quit'):
print("Type a sentence to get a score: (quit)")
input = raw_input()
start = time()
print("input:",input)
print(model.score(input, bos=True, eos=True))
time_took = time() - start
print("time took:", time_took)
if __name__ == '__main__':
main() |
import fourier
import pcf
import ioutils as io
from mathutils import *
import setup
#============================================================================
LOGS_DIR = "../fig10b-bnot-step-jitter/"
TARGET_DIR = "../targets/"
FILE_EXT = ".pdf"
#============================================================================
def buildEnvironment():
print("Building environment...")
trainingSetup = setup.TrainingSetup(
# input
pointCount = 1024,
dimCount = 3,
batchSize = 2,
griddingDims = 0,
# architecture
convCount = 60,
kernelCount = 30,
kernelSampleCount = 128,
receptiveField = 0.5,
projectionsStrings = [ '01', '12', '02' ],
customOp = True,
# training
trainIterations = 100000,
learningRate = 10e-7,
# evaluation
displayGrid = False,
evalRealizations = 1000,
saveEvalRealizations = True,
# IO
storeNetwork = True,
backupInterval = 5000,
weightDir = None
)
histogramSetupList = []
fourierSetupList = []
fourierSetup0 = setup.FourierSetup(
resolution=64,
cancelDC=True,
mcSamplesPerShell=48)
fourierSetup0.loadTarget1D(io.joinPath(TARGET_DIR, "spectra/bnot-powspec-radialmean-d2-n1024.txt"))
fourierSetupList.append(fourierSetup0)
fourierSetup1 = setup.FourierSetup(
resolution=48,
cancelDC=True,
mcSamplesPerShell=48)
fourierSetup1.loadTarget1D(io.joinPath(TARGET_DIR, "spectra/step-powspec-radialmean-d2-n1024.txt"))
fourierSetupList.append(fourierSetup1)
fourierSetup2 = setup.FourierSetup(
resolution=64,
cancelDC=True,
mcSamplesPerShell=48)
fourierSetup2.loadTarget1D(io.joinPath(TARGET_DIR, "spectra/jitter-powspec-radialmean-d2-n1024.txt"))
fourierSetupList.append(fourierSetup2)
return setup.Environment(trainingSetup, fourierSetupList, histogramSetupList)
#============================================================================
def lossSetup(env, outputNode):
histogramNode = None
outputSpectrumNodes = []
lossNode=None
projs = env.trainingSetup.projections
print(projs, len(env.fourierSetupList))
if len(env.fourierSetupList) > 0:
print("======== setting the Fourier lossSetup for projections")
assert len(env.fourierSetupList) == len(projs), "lossSetup() Not enough fourierSetups provided, {0} required.".format(len(projs))
outputSpectrumNodes = []
for k in range(len(projs)):
if len(projs[k]) == 2: #2D Projections
print('Fourier lossStep 2D radialSpectrumMC: ', projs[k])
ptNode = tf.transpose([outputNode[:,:,projs[k][0]],outputNode[:,:,projs[k][1]]],perm=[1,2,0])
outputSpectrumNodes.append(fourier.radialSpectrumMC(ptNode, env.fourierSetupList[k]))
else:
print('Fourier lossStep {0}D radialSpectrumMC'.format(env.trainingSetup.dimCount))
outputSpectrumNodes.append(fourier.radialSpectrumMC(outputNode, env.fourierSetupList[k]))
loss = l1Loss(outputSpectrumNodes[k], env.fourierSetupList[k].target)
lossNode = lossNode + loss if lossNode is not None else loss
return lossNode, outputSpectrumNodes, histogramNode
|
class Decoder:
def __init__(self):
self.dictionary= {
'01':'A', '1000':'B', '1010':'C', '100':'D', '0':'E',
'0010':'F', '110':'G', '0000':'H', '00':'I', '0111':'J',
'101':'K', '0100':'L', '11':'M', '10':'N', '111':'O',
'0110':'P', '1101':'Q', '010':'R', '000':'S', '1':'T',
'001':'U', '0001':'V', '011':'W', '1001':'X', '1011':'Y',
'1100':'Z', '11111':'0', '01111':'1', '00111':'2', '00011':'3',
'00001':'4', '00000':'5', '10000':'6', '11000':'7', '11100':'8',
'11110':'9', '010101':'.', '110011':',', '001100':'?'}
def __str__(self):
string=""
for i in self.dictionary: # Takes every key and its corresponding value in the dictionary
string += i + ":" + self.dictionary[i] + "\n" # and turns it into a string format of Decoder class.
return string
def decode(self,morse_code_sequence):
coded_words = morse_code_sequence.split("***") # Divides the entered sequence into words.
decoded = "" # Created to hold decoded part of the sequence
for each_word in coded_words:
coded_letters = each_word.split('*') # Divides every coded word into coded letters
for each_letter in coded_letters:
if each_letter in self.dictionary: # Checks if the coded letter is in predefined dictionary or not
decoded += self.dictionary[each_letter] # If the coded letter exist in dictionary, decodes it
# and adds it to decoded sequence holder( variable decoded)
else:
return 'Invalid input, it is not defined in morse code dictionary.'# if the coded letter is not in predefined dictionary
# returns an error message
decoded += ' ' # This is the end of outer 'for' loop which takes every word to decode.
# After decoding a word this command adds a space to decoded sequence.
decoded = decoded.strip() # This command is executed after all the words are decoded. The command in the previous line adds a space after finishing decoding process of a word
# but for the last word it shouldn't happen so this command cancels the last space.
if decoded[len(decoded)-1] not in ["?",".",","]: # Checks whether the last element of decoded sequence is a punctuation or not.
return 'Invalid input, it can not finish without a punctuation.' # If not returns an error message
return decoded # If there is no error in previous steps, module returns to decoded sequence.
|
produtos = ('Leite', 3,
'Coca', 4,
'Calabresa', 15)
print(f'{"LISTAGEM DE PREÇO":^40}')
for n in range(0, len(produtos)):
if n % 2 == 0:
print(f'{produtos[n]:.<30}', end='R$')
else:
print(f'{produtos[n]:>6.2f}') |
def soma_hipotenusas(n):
print ("As hipotenusas são")
soma_hipotenusas = 0
a = 1
b = 1
while n > 1:
while b < n:
while n**2 != a**2 + b**2 and a < n:
a = a + 1
if n**2 == a**2 + b**2:
soma_hipotenusas = soma_hipotenusas + n
b = b + 1
a = 1
print (n, end = " ")
n = n - 1
else:
b = b + 1
a = 1
n = n - 1
b = 1
print()
print("O Valor da Soma das Hipotenusas é")
return (soma_hipotenusas)
print(soma_hipotenusas(int(input("digite o valor de h:"))))
|
def capacity_scaling(
G,
demand: str = "demand",
capacity: str = "capacity",
weight: str = "weight",
heap=...,
): ...
|
#!/usr/local/bin/python
import datetime
import json
import os
import sys
import traceback
import twitter
import urllib2
import xml.dom.minidom
import Config
import File
import FixText
import Job
import Secret
JSON_FIELDS = {
'title': '',
'titleList': [],
'listeners': '0',
'unique': '0',
'bitrate': '128'}
RUNNING = 'running'
NOT_RUNNING = 'not running'
NO_STREAM = 'no stream'
def getRawStatusRecords(data, status):
if not data:
return NOT_RUNNING
dom = xml.dom.minidom.parseString(data)
items = dom.getElementsByTagName('item')
if not items:
return NO_STREAM
item = items[0]
for child in item.childNodes:
name = child.tagName
if child.childNodes:
text = child.childNodes[0].wholeText
if name == 'title' and text:
text = FixText.swapParts(text)
status[name] = text
return ''
def getStatusRecord(data):
statusRecord = {}
error = getRawStatusRecords(data, statusRecord)
if error:
return dict(error=error)
else:
return dict((k, statusRecord.get(k, d)) for k, d in JSON_FIELDS.iteritems())
class StatusJob(Job.Job):
API = twitter.Api(
consumer_key = Secret.consumer_key,
consumer_secret = Secret.consumer_secret,
access_token_key = Secret.access_token_key,
access_token_secret = Secret.access_token_secret)
def __init__(self):
Job.Job.__init__(self, Config.STATUS)
self.output = self.output or {}
def process(self, data):
def getTitle(out):
return (out or {}).get('title', None)
output = getStatusRecord(data)
title = getTitle(output)
if title == getTitle(self.output):
return self.output
titleList = self.output.get('titleList', [])
if title:
index = (1 + titleList[0]['index']) if titleList else 0
time = time=datetime.datetime.now().strftime('%H:%M')
titleList.insert(0, {'index': index, 'title': title, 'time': time})
while len(titleList) > Config.MAX_TITLES:
titleList.pop()
output['titleList'] = titleList
return output
def onOutputChanged(self, output):
if Config.POST_TO_TWITTER and output:
try:
t = output.get('title', None)
if t and (not self.output or (self.output.get('title', None) != t)):
File.replaceJson(Config.STATUS_TITLE_FILE, title=t)
StatusJob.API.PostUpdate(FixText.fitToSize(t))
except:
traceback.print_exc(file=sys.stdout)
Job.Job.onOutputChanged(self, output)
|
from django.db import models
class CategoryManager(models.Manager):
"""
Adds number of items and number of subcategories to category objects
"""
def get_queryset(self):
return super().get_queryset().annotate(number_of_items=models.Count('items')).annotate(
number_of_subcategories=models.Count('subcategories'))
class Category(models.Model):
title = models.CharField(max_length=30, unique=True)
description = models.TextField(blank=True, default='')
parent_category = models.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL,
related_name='subcategories')
objects = CategoryManager()
def __str__(self):
return self.title
class Item(models.Model):
title = models.CharField(max_length=30)
owner = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE, related_name='items')
price = models.DecimalField(max_digits=8, decimal_places=2)
description = models.TextField(blank=True, default='')
image = models.ImageField(upload_to='images/', blank=True, null=True)
number_of_views = models.PositiveIntegerField(default=0)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='items')
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
|
#!/usr/bin/env python
#-*-coding: utf-8 -*-
"""
@version: 0.1
@author:linyl
@file: html_parser.py
@time: 2018/9/20 21:55
"""
import re
import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup =BeautifulSoup(html_cont, 'html.parser',from_encoding='utf-8')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls,new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
# /item/123.html
links = soup.find_all('a',href=re.compile(r'/item/\d+\.html'))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url,new_url)
print(new_full_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
"""<dd class="lemmaWgt-lemmaTitle-title">
<h1>Python</h1>
<h2>(计算机程序设计语言)</h2>
<a href="javascript:;" class="edit-lemma cmn-btn-hover-blue cmn-btn-28 j-edit-link" style="display: inline-block;"><em class="cmn-icon wiki-lemma-icons wiki-lemma-icons_edit-lemma"></em>编辑</a>
<a class="lock-lemma" nslog-type="10003105" target="_blank" href="/view/10812319.htm" title="锁定"><em class="cmn-icon wiki-lemma-icons wiki-lemma-icons_lock-lemma"></em>锁定</a>
</dd>
"""
title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find('h1')
res_data['title'] = title_node.get_text()
"""<div class="lemma-summary" label-module="lemmaSummary">
<div class="para" label-module="para">Python 是一个有条理的和强大的面向对象的程序设计语言,类似于Perl, Ruby, Scheme, 或 Java.</div>
</div>
"""
summary_node = soup.find('div',class_='lemma-summary')
res_data['summary'] = summary_node.get_text()
res_data['url'] = page_url
return res_data
|
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a = torch.ones(1, 2, 3, 3).to(device)
print(a)
a = a + torch.zeros(a.size()).data.normal_(0, 0.1).to(device)
print(a.size(), a) |
# Django
from django.db import models
# Local Django
from appointments.variables import APPOINTMENT_STATUSES, PENDING
class Appointment(models.Model):
status = models.PositiveSmallIntegerField(
verbose_name='Status', choices=APPOINTMENT_STATUSES, default=PENDING
)
subject = models.TextField(verbose_name='Subject')
start_date = models.DateTimeField(verbose_name='Start Date')
end_date = models.DateTimeField(verbose_name='End Date')
created = models.ForeignKey(
verbose_name='Created', to='users.User'
)
appointee = models.ForeignKey(
verbose_name='Appointee', to='users.User', related_name='appointments'
)
class Meta:
verbose_name = 'Appointment'
verbose_name_plural = 'Appointments'
def __str__(self):
return '{created} - {appointee}'.format(
created=self.created.get_full_name(),
appointee=self.appointee.get_full_name()
)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake buckets data."""
FAKE_BUCKETS_MAP = [{
'project_number': 11111,
'buckets': [{
'kind': 'storage#bucket',
'name': 'fakebucket1',
'timeCreated': '2016-07-21T12:57:04.604Z',
'updated': '2016-07-21T12:57:04.604Z',
'projectNumber': '11111',
'metageneration': '2',
'location': 'EU',
'etag': 'CAE=',
'id': 'fakebucket1',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'storageClass': 'STANDARD',
'lifecycle': {}
}]
}]
EXPECTED_LOADABLE_BUCKETS = [{
'project_number': 11111,
'bucket_id': 'fakebucket1',
'bucket_name': 'fakebucket1',
'bucket_kind': 'storage#bucket',
'bucket_storage_class': 'STANDARD',
'bucket_location': 'EU',
'bucket_create_time': '2016-07-21 12:57:04',
'bucket_update_time': '2016-07-21 12:57:04',
'bucket_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1',
'bucket_lifecycle_raw': '{}',
'raw_bucket': '{"updated": "2016-07-21T12:57:04.604Z", "timeCreated": "2016-07-21T12:57:04.604Z", "metageneration": "2", "id": "fakebucket1", "kind": "storage#bucket", "name": "fakebucket1", "projectNumber": "11111", "etag": "CAE=", "storageClass": "STANDARD", "lifecycle": {}, "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1", "location": "EU"}'
}
]
FAKE_BUCKET_ACL_MAP = [{
'bucket_name': 'fakebucket1',
'acl': [
{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-owners-11111',
'etag': 'CAE=',
'role': 'OWNER',
'projectTeam': {
'projectNumber': '11111',
'team': 'owners'
},
'id': 'fakebucket1/project-owners-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111'
},
{
'kind': 'storage#bucketAccessControl',
'bucket': 'fakebucket1',
'entity': 'project-readers-11111',
'etag': 'CAE=',
'role': 'READER',
'projectTeam': {
'projectNumber': '11111',
'team': 'readers'},
'id': 'fakebucket1/project-readers-11111',
'selfLink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111'
}
]
}]
EXPECTED_LOADABLE_BUCKET_ACLS = [{
'acl_id': 'fakebucket1/project-owners-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111',
'domain': None,
'email': None,
'entity': 'project-owners-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "owners"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "OWNER", "projectTeam": {"projectNumber": "11111", "team": "owners"}, "bucket": "fakebucket1", "id": "fakebucket1/project-owners-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-owners-11111", "entity": "project-owners-11111"}',
'role': 'OWNER'
},
{
'acl_id': 'fakebucket1/project-readers-11111',
'bucket': 'fakebucket1',
'bucket_acl_selflink': 'https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111',
'domain': None,
'email': None,
'entity': 'project-readers-11111',
'entity_id': None,
'kind': 'storage#bucketAccessControl',
'project_team': '{"projectNumber": "11111", "team": "readers"}',
'raw_bucket_acl': '{"kind": "storage#bucketAccessControl", "etag": "CAE=", "role": "READER", "projectTeam": {"projectNumber": "11111", "team": "readers"}, "bucket": "fakebucket1", "id": "fakebucket1/project-readers-11111", "selfLink": "https://www.googleapis.com/storage/v1/b/fakebucket1/acl/project-readers-11111", "entity": "project-readers-11111"}',
'role': 'READER'
}]
FAKE_RAW_BUCKET_ROW = [
{
'bucket_id': 'bucket1',
'raw_bucket': """{
"acl": [
{"id": "bucket1/project-readers-1",
"role": "READER",
"bucket": "bucket1",
"domain": "",
"email": "",
"entity": "",
"entityId": "",
"kind": "",
"projectTeam": []
}
],
"id": "bucket1"
}"""
}
]
EXPECTED_RAW_BUCKET_JSON = [
{
'bucket_name': 'bucket1',
'acl': [
{'id': 'bucket1/project-readers-1',
'role': 'READER',
'bucket': 'bucket1',
'domain': '',
'email': '',
'entity': '',
'entityId': '',
'kind': '',
'projectTeam': [],
}
]
}
]
|
import os
import threading
import multiprocessing
from helper import *
from network import NetworkWrapper
from worker import Worker
from config import *
from time import sleep, time
load_model = False
if load_model == True:
FLAGS.experience_buffer_maxlen = 100
FLAGS.episodes = 600
#Reset the graph
tf.reset_default_graph()
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = dir_path + '/train_0'
frames_path = dir_path + '/frames'
#Create folders
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(frames_path):
os.makedirs(frames_path)
# copy tsv for embeddings
bashCommand = "cp " + dir_path + "/embedding_metadata.tsv " + model_path
os.system(bashCommand)
config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.allow_growth = True
with tf.device(FLAGS.device),tf.Session(config = config) as sess:
global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes',
trainable=False)
trainer = tf.train.AdamOptimizer(1e-4,)
#Create master network : it will hold the gradients
#Each worker will update these gradients and sync with the master
network_wrapepr = NetworkWrapper('global', trainer, None, FLAGS)
master_network = network_wrapepr.get_network()
# Set workers ot number of available CPU threads
num_workers = multiprocessing.cpu_count()
# num_workers = 1
workers = []
for index in range(num_workers):
# Create worker classes
worker = Worker(index, sess, trainer, dir_path, global_episodes,
master_network, FLAGS)
# Initialize associated game
worker.init_game(GAME_NAME, INPUT_SIZE)
workers.append(worker)
saver = tf.train.Saver(max_to_keep=20)
coord = tf.train.Coordinator()
if load_model == True:
print ('LOG: Loading Model... %s'%model_path)
# model_checkpoint_path = ckpt.model_checkpoint_path
# model_checkpoint_path = model_path + '/model-500.ckpt'
model_checkpoint_path = tf.train.latest_checkpoint(model_path)
saver.restore(sess, model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
start = time()
# Start the work process for each worker
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(sess, coord, saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
end = time()
minutes = (end - start)/60
print('LOG: Training for %d episodes took %f minutes' % (FLAGS.episodes,
minutes))
'''
From lab directory (where A3C directory was placed)
COMMAND :bazel run :a3c_train --define headless=osmesa
tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
'''
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()
|
from rest_framework import serializers
from django.db.models import Q
from . import models
from artuium_server.users import serializers as users_serializers
from artuium_server.artwork import serializers as artwork_serializers
from artuium_server.exhibition import serializers as exhibition_serializers
class ReviewSerializer(serializers.ModelSerializer):
author = users_serializers.ProfileSerializer()
exhibition = exhibition_serializers.ExhibitionSerializer()
artwork = artwork_serializers.ArtworkSerializer()
is_liked = serializers.SerializerMethodField()
is_reported = serializers.SerializerMethodField()
class Meta:
model = models.Review
fields = ['id', 'author', 'time', 'content', 'exhibition', 'artwork', 'rate', 'expression', 'recommended', 'reply_count', 'like_count', 'is_liked', 'is_reported']
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
like_check = models.Like.objects.filter(user = user, review = obj)
if like_check.count() > 0:
return True
else:
return False
return False
def get_is_reported(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
reporting_check = models.Reporting.objects.filter(user = user, review = obj)
if reporting_check.count() > 0:
return True
else:
return False
return False
class NoticeSerializer(serializers.ModelSerializer):
is_new = serializers.SerializerMethodField()
class Meta:
model = models.Notice
fields = ['id', 'title', 'date', 'content', 'image', 'is_banner', 'is_new', 'image_width', 'image_height']
def get_is_new(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
if obj.date >= user.date_joined:
notice_check = models.NoticeCheck.objects.filter(user = user, notice = obj)
if notice_check.count() > 0:
return False
else:
return True
else:
return False
return False
class ReplySerializer(serializers.ModelSerializer):
author = users_serializers.ProfileSerializer()
reply_count = serializers.SerializerMethodField()
initial_replies = serializers.SerializerMethodField()
review = ReviewSerializer()
class Meta:
model = models.Reply
fields = ['id', 'review', 'author', 'time', 'content', 'reply_count', 'initial_replies']
def get_reply_count(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
blocking_reply = models.Blocking.objects.filter(user = user, reply__isnull = False).values_list('reply__id', flat = True)
blocking_user = models.Blocking.objects.filter(user = user, to_user__isnull = False).values_list('to_user__id', flat = True)
replies = obj.replies.filter(Q(deleted = False) & ~Q(id__in = blocking_reply) & ~Q(author__id__in = blocking_user))
return replies.count()
else:
return obj.replies.count()
def get_initial_replies(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
blocking_reply = models.Blocking.objects.filter(user = user, reply__isnull = False).values_list('reply__id', flat = True)
blocking_user = models.Blocking.objects.filter(user = user, to_user__isnull = False).values_list('to_user__id', flat = True)
replies = obj.replies.filter(Q(deleted = False) & ~Q(id__in = blocking_reply) & ~Q(author__id__in = blocking_user)).order_by('time')[:3]
replies_list = []
for reply in replies:
is_me = False
if user == reply.author:
is_me = True
replies_list.append({
'id': reply.id,
'time': reply.time,
'content': reply.content,
'author': {
'id': reply.author.id,
'nickname': reply.author.nickname,
'is_me': is_me
}
})
return replies_list
else:
replies = obj.replies.filter(deleted = False).order_by('time')[:3]
replies_list = []
for reply in replies:
is_me = False
if user == reply.author:
is_me = True
replies_list.append({
'id': reply.id,
'time': reply.time,
'content': reply.content,
'author': {
'id': reply.author.id,
'nickname': reply.author.nickname,
'is_me': is_me
}
})
return replies_list
class LikeSerializer(serializers.ModelSerializer):
user = users_serializers.ProfileSerializer()
review = ReviewSerializer()
artwork = artwork_serializers.ArtworkSerializer()
exhibition = exhibition_serializers.ExhibitionSerializer()
class Meta:
model = models.Like
fields = ['id', 'user', 'review', 'artwork', 'exhibition', 'time']
class NotificationSerializer(serializers.ModelSerializer):
from_user = users_serializers.ProfileSerializer()
to_user = users_serializers.ProfileSerializer()
is_new = serializers.SerializerMethodField()
review = ReviewSerializer()
reply = ReplySerializer()
class Meta:
model = models.Notification
fields = ['id', 'from_user', 'to_user', 'type', 'review', 'reply', 'date', 'is_new']
def get_is_new(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
if obj.date >= user.date_joined:
notification_check = models.NotificationCheck.objects.filter(user = user, notification = obj)
if notification_check.count() > 0:
return False
else:
return True
else:
return False
return False |
#언패킹
a = [(1, 2), (3, 4), (5, 6)]
for i, j in a:
print("%d + %d = %d"%(i, j, i + j))
filmFestival = {
"최우수 작품상":"택시운전사",
"감독상":"아이 캔 스피크",
"남우주연상":"송강호",
"여우주연상":"나문희"
}
for prize in filmFestival:
print(prize)
for winner in filmFestival.values():
print(winner)
#key와 value를 묶어서 들고옴
for prize_winner in filmFestival.items():
print(prize_winner)
for prize, winner in filmFestival.items():
print(prize + " : " + winner)
|
import os
import shutil
import glob
# get current directory and list the files and folders
current_dir = os.getcwd()
list_of_files_and_folders = os.listdir(current_dir)
# Set of folders to be excluded
folders_list = {
'Folders',
'Image files',
'Excel Files',
'Docs and ppts',
'PDF files',
'Executables',
'Archives',
'Other files'
}
# create basic folders from folder list
for folder in folders_list:
if not os.path.exists(os.path.join(current_dir, folder)):
os.makedirs(os.path.join(current_dir, folder))
# Move all file from one diectory to another
def moveAllFilesinDir(srcDir, dstDir):
if not os.path.exists(dstDir):
os.makedirs(dstDir)
# Check if both the are directories
if os.path.isdir(srcDir) and os.path.isdir(dstDir):
# Iterate over all the files in source directory
for filePath in glob.glob(srcDir + '\/*'):
# Move each file to destination Directory
shutil.move(filePath, dstDir)
shutil.rmtree(srcDir)
else:
print("srcDir & dstDir should be Directories")
for file in list_of_files_and_folders:
filename, extension = os.path.splitext(file)
current_folder = os.path.join(current_dir, file)
dest_folder = os.path.join(current_dir, 'Folders', file)
if file != 'folder_organizer.exe':
if not extension:
# Check if it is an organiser folder
if filename not in folders_list:
moveAllFilesinDir(current_folder, dest_folder)
else:
if extension in ('.jpg', '.png', '.gif'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Image files', file))
elif extension in ('.xls', '.xlsx', '.xltx', '.xlsm'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Excel files', file))
elif extension in ('.doc', '.docx', '.ppt', '.pptx'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Docs and ppts', file))
elif extension in ('.pdf'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'PDF files', file))
elif extension in ('.bat', '.exe'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Executables', file))
elif extension in ('.zip', '.rar', '.tar', '.iso', '.tar.gz', '.7z'):
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Archives', file))
else:
shutil.move(
os.path.join(current_dir, file),
os.path.join(current_dir, 'Other files', file)) |
import SteamScraperEngine
import json
import os.path
url ="https://store.steampowered.com/search/results/?query&start=0&count=50&dynamic_data=&force_infinite=1&category1=998%2C994%2C21%2C10%2C997&filter=topsellers&snr=1_7_7_7000_7&infinite=1"
urlGenre = "https://store.steampowered.com/tag/browse/#global_492"
def makeJSON(targetDirectory, accessMode, dictionary, indent):
# Procedure that converts a python dictionary into an external JSON File
with open(os.path.dirname(__file__) + targetDirectory, accessMode) as f:
json.dump(dictionary, f, indent=indent)
print("######## Scraping Data is Running ATM.... ########")
gameList, genreList, gameDevList = SteamScraperEngine.SteamScraperMain(url, urlGenre)
# Creating dictionary that contains list of games
# That's been successfully scraped
GameDictionary = {
"GamesData" : gameList,
}
print("######## Creating JSON files ATM.... ########")
# Converting from python dictionary to JSON object
# Creating external JSON file
makeJSON('/../data/SteamGame.json', 'w', GameDictionary, 2)
# Creating dictionary that contains list of gemes
# That's been successfully scraped
GenreDeveloperDictionary = {
"GenreData" : genreList,
"DeveloperData" : gameDevList
}
# Converting from python dictionary to JSON object
# Creating external JSON file
makeJSON('/../data/SteamGenreDeveloper.json', 'w', GenreDeveloperDictionary, 2)
print("######## JSON Files has been cretead successfully.... ########") |
import numpy as np
import hpgeom as hpg
import numbers
from .healSparseCoverage import HealSparseCoverage
from .utils import reduce_array, check_sentinel, _get_field_and_bitval, WIDE_NBIT, WIDE_MASK
from .utils import is_integer_value, _compute_bitshift
from .io_map import _read_map, _write_map, _write_moc
import warnings
class HealSparseMap(object):
"""
Class to define a HealSparseMap
"""
def __init__(self, cov_map=None, cov_index_map=None, sparse_map=None, nside_sparse=None,
healpix_map=None, nside_coverage=None, primary=None, sentinel=None,
nest=True, metadata=None, _is_view=False):
"""
Instantiate a HealSparseMap.
Can be created with cov_index_map, sparse_map, and nside_sparse; or with
healpix_map, nside_coverage. Also see `HealSparseMap.read()`,
`HealSparseMap.make_empty()`, `HealSparseMap.make_empty_like()`.
Parameters
----------
cov_map : `HealSparseCoverage`, optional
Coverage map object
cov_index_map : `np.ndarray`, optional
Coverage index map, will be deprecated
sparse_map : `np.ndarray`, optional
Sparse map
nside_sparse : `int`, optional
Healpix nside for sparse map
healpix_map : `np.ndarray`, optional
Input healpix map to convert to a sparse map
nside_coverage : `int`, optional
Healpix nside for coverage map
primary : `str`, optional
Primary key for recarray, required if dtype has fields.
sentinel : `int` or `float`, optional
Sentinel value. Default is `UNSEEN` for floating-point types,
minimum int for int types, and False for bool types.
nest : `bool`, optional
If input healpix map is in nest format. Default is True.
metadata : `dict`-like, optional
Map metadata that can be stored in FITS header format.
_is_view : `bool`, optional
This healSparse map is a view into another healsparse map.
Not all features will be available. (Internal usage)
Returns
-------
healSparseMap : `HealSparseMap`
"""
if cov_index_map is not None and cov_map is not None:
raise RuntimeError('Cannot specify both cov_index_map and cov_map')
if cov_index_map is not None:
warnings.warn("cov_index_map deprecated", DeprecationWarning, stacklevel=2)
cov_map = HealSparseCoverage(cov_index_map, nside_sparse)
if cov_map is not None and sparse_map is not None and nside_sparse is not None:
# this is a sparse map input
self._cov_map = cov_map
self._sparse_map = sparse_map
elif healpix_map is not None and nside_coverage is not None:
# this is a healpix_map input
if sentinel is None:
sentinel = hpg.UNSEEN
if is_integer_value(healpix_map[0]) and not is_integer_value(sentinel):
raise ValueError("The sentinel must be set to an integer value with an integer healpix_map")
elif not is_integer_value(healpix_map[0]) and is_integer_value(sentinel):
raise ValueError("The sentinel must be set to an float value with an float healpix_map")
self._cov_map, self._sparse_map = self.convert_healpix_map(healpix_map,
nside_coverage=nside_coverage,
nest=nest,
sentinel=sentinel)
nside_sparse = hpg.npixel_to_nside(healpix_map.size)
else:
raise RuntimeError("Must specify either cov_map/sparse_map or healpix_map/nside_coverage")
self._nside_sparse = nside_sparse
self._is_rec_array = False
self._is_wide_mask = False
self._wide_mask_width = 0
self._primary = primary
self.metadata = metadata
self._is_view = _is_view
if self._sparse_map.dtype.fields is not None:
self._is_rec_array = True
if self._primary is None:
raise RuntimeError("Must specify `primary` field when using a recarray for the sparse_map.")
self._sentinel = check_sentinel(self._sparse_map[self._primary].dtype.type, sentinel)
else:
if ((self._sparse_map.dtype.type == WIDE_MASK) and len(self._sparse_map.shape) == 2):
self._is_wide_mask = True
self._wide_mask_width = self._sparse_map.shape[1]
self._wide_mask_maxbits = WIDE_NBIT * self._wide_mask_width
self._sentinel = check_sentinel(self._sparse_map.dtype.type, sentinel)
@classmethod
def read(cls, filename, nside_coverage=None, pixels=None, header=False,
degrade_nside=None, weightfile=None, reduction='mean',
use_threads=False):
"""
Read in a HealSparseMap.
Parameters
----------
filename : `str`
Name of the file to read. May be either a regular HEALPIX
map or a HealSparseMap
nside_coverage : `int`, optional
Nside of coverage map to generate if input file is healpix map.
pixels : `list`, optional
List of coverage map pixels to read. Only used if input file
is a HealSparseMap
header : `bool`, optional
Return the fits header metadata as well as map? Default is False.
degrade_nside : `int`, optional
Degrade map to this nside on read. None means leave as-is.
Not yet implemented for parquet files.
weightfile : `str`, optional
Floating-point map to supply weights for degrade wmean. Must
be a HealSparseMap (weighted degrade not supported for
healpix degrade-on-read).
Not yet implemented for parquet files.
reduction : `str`, optional
Reduction method with degrade-on-read.
(mean, median, std, max, min, and, or, sum, prod, wmean).
Not yet implemented for parquet files.
use_threads : `bool`, optional
Use multithreaded reading for parquet files.
Returns
-------
healSparseMap : `HealSparseMap`
HealSparseMap from file, covered by pixels
header : `fitsio.FITSHDR` or `astropy.io.fits` (if header=True)
Fits header for the map file.
"""
return _read_map(cls, filename, nside_coverage=nside_coverage, pixels=pixels,
header=header, degrade_nside=degrade_nside,
weightfile=weightfile, reduction=reduction, use_threads=use_threads)
@classmethod
def make_empty(cls, nside_coverage, nside_sparse, dtype, primary=None, sentinel=None,
wide_mask_maxbits=None, metadata=None, cov_pixels=None):
"""
Make an empty map with nothing in it.
Parameters
----------
nside_coverage : `int`
Nside for the coverage map
nside_sparse : `int`
Nside for the sparse map
dtype : `str` or `list` or `np.dtype`
Datatype, any format accepted by numpy.
primary : `str`, optional
Primary key for recarray, required if dtype has fields.
sentinel : `int` or `float`, optional
Sentinel value. Default is `UNSEEN` for floating-point types,
and minimum int for int types.
wide_mask_maxbits : `int`, optional
Create a "wide bit mask" map, with this many bits.
metadata : `dict`-like, optional
Map metadata that can be stored in FITS header format.
cov_pixels : `np.ndarray` or `list`
List of integer coverage pixels to pre-allocate
Returns
-------
healSparseMap : `HealSparseMap`
HealSparseMap filled with sentinel values.
"""
test_arr = np.zeros(1, dtype=dtype)
if wide_mask_maxbits is not None:
if test_arr.dtype != WIDE_MASK:
raise ValueError("Must use dtype=healsparse.WIDE_MASK to use a wide_mask")
if sentinel is not None:
if sentinel != 0:
raise ValueError("Sentinel must be 0 for wide_mask")
nbitfields = (wide_mask_maxbits - 1) // WIDE_NBIT + 1
if cov_pixels is None:
cov_map = HealSparseCoverage.make_empty(nside_coverage, nside_sparse)
# One pixel is the overflow pixel of a truly empty map
npix = 1
else:
cov_pixels = np.atleast_1d(cov_pixels)
cov_map = HealSparseCoverage.make_from_pixels(nside_coverage, nside_sparse,
cov_pixels)
# We need to allocate the overflow pixel
npix = cov_pixels.size + 1
if wide_mask_maxbits is not None:
# The sentinel is always zero
_sentinel = 0
sparse_map = np.zeros((cov_map.nfine_per_cov*npix, nbitfields), dtype=dtype)
elif test_arr.dtype.fields is None:
# Non-recarray
_sentinel = check_sentinel(test_arr.dtype.type, sentinel)
sparse_map = np.full(cov_map.nfine_per_cov*npix, _sentinel, dtype=dtype)
else:
# Recarray type
if primary is None:
raise RuntimeError("Must specify 'primary' field when using a recarray for the sparse_map.")
primary_found = False
for name in test_arr.dtype.names:
if name == primary:
_sentinel = check_sentinel(test_arr[name].dtype.type, sentinel)
test_arr[name] = _sentinel
primary_found = True
else:
test_arr[name] = check_sentinel(test_arr[name].dtype.type, None)
if not primary_found:
raise RuntimeError("Primary field not found in input dtype of recarray.")
sparse_map = np.full(cov_map.nfine_per_cov*npix, test_arr, dtype=dtype)
return cls(cov_map=cov_map, sparse_map=sparse_map,
nside_sparse=nside_sparse, primary=primary, sentinel=_sentinel,
metadata=metadata)
@classmethod
def make_empty_like(cls, sparsemap, nside_coverage=None, nside_sparse=None, dtype=None,
primary=None, sentinel=None, wide_mask_maxbits=None, metadata=None,
cov_pixels=None):
"""
Make an empty map with the same parameters as an existing map.
Parameters
----------
sparsemap : `HealSparseMap`
Sparse map to use as basis for new empty map.
nside_coverage : `int`, optional
Coverage nside, default to sparsemap.nside_coverage
nside_sparse : `int`, optional
Sparse map nside, default to sparsemap.nside_sparse
dtype : `str` or `list` or `np.dtype`, optional
Datatype, any format accepted by numpy. Default is sparsemap.dtype
primary : `str`, optional
Primary key for recarray. Default is sparsemap.primary
sentinel : `int` or `float`, optional
Sentinel value. Default is sparsemap._sentinel
wide_mask_maxbits : `int`, optional
Create a "wide bit mask" map, with this many bits.
metadata : `dict`-like, optional
Map metadata that can be stored in FITS header format.
cov_pixels : `np.ndarray` or `list`
List of integer coverage pixels to pre-allocate
Returns
-------
healSparseMap : `HealSparseMap`
HealSparseMap filled with sentinel values.
"""
if nside_coverage is None:
nside_coverage = sparsemap.nside_coverage
if nside_sparse is None:
nside_sparse = sparsemap.nside_sparse
if dtype is None:
dtype = sparsemap.dtype
if primary is None:
primary = sparsemap.primary
if sentinel is None:
sentinel = sparsemap._sentinel
if wide_mask_maxbits is None:
if sparsemap._is_wide_mask:
wide_mask_maxbits = sparsemap._wide_mask_maxbits
if metadata is None:
metadata = sparsemap._metadata
return cls.make_empty(nside_coverage, nside_sparse, dtype, primary=primary,
sentinel=sentinel, wide_mask_maxbits=wide_mask_maxbits,
metadata=metadata, cov_pixels=cov_pixels)
@staticmethod
def convert_healpix_map(healpix_map, nside_coverage, nest=True, sentinel=hpg.UNSEEN):
"""
Convert a healpix map to a healsparsemap.
Parameters
----------
healpix_map : `np.ndarray`
Numpy array that describes a healpix map.
nside_coverage : `int`
Nside for the coverage map to construct
nest : `bool`, optional
Is the input map in nest format? Default is True.
sentinel : `float`, optional
Sentinel value for null values in the sparse_map.
Returns
-------
cov_map : `HealSparseCoverage`
Coverage map with pixel indices
sparse_map : `np.ndarray`
Sparse map of input values.
"""
if not nest:
healpix_map = hpg.reorder(healpix_map, ring_to_nest=True)
# Compute the coverage map...
# Note that this is coming from a standard healpix map so the sentinel
# is always hpg.UNSEEN
ipnest, = np.where(healpix_map > hpg.UNSEEN)
nside_sparse = hpg.npixel_to_nside(healpix_map.size)
cov_map = HealSparseCoverage.make_empty(nside_coverage, nside_sparse)
ipnest_cov = cov_map.cov_pixels(ipnest)
cov_pix = np.unique(ipnest_cov)
cov_map.initialize_pixels(cov_pix)
sparse_map = np.full((cov_pix.size + 1)*cov_map.nfine_per_cov,
sentinel, dtype=healpix_map.dtype)
sparse_map[ipnest + cov_map[ipnest_cov]] = healpix_map[ipnest]
return cov_map, sparse_map
def write(self, filename, clobber=False, nocompress=False, format='fits', nside_io=4):
"""
Write a HealSparseMap to a file. Use the `metadata` property from
the map to persist additional information in the fits header.
Parameters
----------
filename : `str`
Name of file to save
clobber : `bool`, optional
Clobber existing file? Default is False.
nocompress : `bool`, optional
If this is False, then integer maps will be compressed losslessly.
Note that `np.int64` maps cannot be compressed in the FITS standard.
This option only applies if format=``fits``.
nside_io : `int`, optional
The healpix nside to partition the output map files in parquet.
Must be less than or equal to nside_coverage, and not greater than 16.
This option only applies if format=``parquet``.
format : `str`, optional
File format. May be ``fits``, ``parquet``, or ``healpix``. Note that
the ``healpix`` EXPLICIT format does not maintain all metadata and
coverage information.
Raises
------
NotImplementedError if file format is not supported.
ValueError if nside_io is out of range.
"""
_write_map(self, filename, clobber=clobber, nocompress=nocompress, format=format,
nside_io=nside_io)
def write_moc(self, filename, clobber=False):
"""
Write the valid pixels of a HealSparseMap to a multi-order component (MOC)
file. Note that values of the pixels are not persisted in MOC format.
Parameters
----------
filename : `str`
Name of file to save
clobber : `bool`, optional
Clobber existing file? Default is False.
"""
_write_moc(self, filename, clobber=clobber)
def _reserve_cov_pix(self, new_cov_pix):
"""
Reserve new coverage pixels. This routine does no checking, it should
be done by the caller.
Parameters
----------
new_cov_pix : `np.ndarray`
Integer array of new coverage pixels
"""
new_cov_map = self._cov_map.append_pixels(len(self._sparse_map), new_cov_pix, check=False)
self._cov_map = new_cov_map
# Use resizing
oldsize = len(self._sparse_map)
newsize = oldsize + new_cov_pix.size*self._cov_map.nfine_per_cov
if self._is_wide_mask:
self._sparse_map.resize((newsize, self._wide_mask_width), refcheck=False)
else:
self._sparse_map.resize(newsize, refcheck=False)
# Fill with blank values
self._sparse_map[oldsize:] = self._sparse_map[0]
def update_values_pos(self, ra_or_theta, dec_or_phi, values,
lonlat=True, operation='replace'):
"""
Update the values in the sparsemap for a list of positions.
Parameters
----------
ra_or_theta : `float`, array-like
Angular coordinates of points on a sphere.
dec_or_phi : `float`, array-like
Angular coordinates of points on a sphere.
values : `np.ndarray` or `None`
Value or Array of values. Must be same type as sparse_map.
If None, then the pixels will be set to the sentinel map value.
lonlat : `bool`, optional
If True, input angles are longitude and latitude in degrees.
Otherwise, they are co-latitude and longitude in radians.
operation : `str`, optional
Operation to use to update values. May be 'replace' (default);
'add'; 'or', or 'and' (for bit masks).
Raises
------
ValueError
If positions do not resolve to unique positions and operation
is 'replace', or if values is None and operation is not 'replace'.
Notes
-----
During the 'add' operation, if the default sentinel map value is not
equal to 0, then any default values will be set to 0 prior to addition.
"""
return self.update_values_pix(hpg.angle_to_pixel(self._nside_sparse,
ra_or_theta,
dec_or_phi,
lonlat=lonlat),
values,
operation=operation)
def update_values_pix(self, pixels, values, nest=True, operation='replace'):
"""
Update the values in the sparsemap for a list of pixels.
The list of pixels must be unique if the operation is 'replace'.
Parameters
----------
pixels : `np.ndarray`
Integer array of sparse_map pixel values
values : `np.ndarray` or `None`
Value or Array of values. Must be same type as sparse_map.
If None, then the pixels will be set to the sentinel map value.
operation : `str`, optional
Operation to use to update values. May be 'replace' (default);
'add'; 'or', or 'and' (for bit masks).
Raises
------
ValueError
Raised if pixels are not unique and operation is 'replace', or if
operation is not 'replace' on a recarray map, or if values is
None and operation is not 'replace'.
Notes
-----
During the 'add' operation, if the default sentinel map value is not
equal to 0, then any default values will be set to 0 prior to addition.
"""
# When None is specified, we use the sentinel value.
if values is None:
if operation != 'replace':
raise ValueError("Can only use 'None' with 'replace' operation.")
if self._is_wide_mask:
values = np.full(self._wide_mask_width, self._sentinel)
elif self._is_rec_array:
values = np.zeros(1, dtype=self._sparse_map.dtype)
values[self._primary] = self._sentinel
else:
values = self._sentinel
if operation != 'replace':
if operation in ['or', 'and']:
if not self.is_integer_map or self._sentinel != 0:
raise ValueError("Can only use and/or with integer map with 0 sentinel")
elif operation == 'add':
if self._is_rec_array:
raise ValueError("Cannot use 'add' operation with a recarray map.")
else:
raise ValueError("Only 'replace', 'add', 'or', and 'and' are supported operations")
if operation == 'replace':
# Check for unique pixel positions
if hasattr(pixels, "__len__"):
if len(np.unique(pixels)) < len(pixels):
raise ValueError("List of pixels must be unique if operation='replace'")
# If _not_ recarray, we can use a single int/float
is_single_value = False
_values = values
if not self._is_rec_array:
if self._is_wide_mask:
# Special for wide_mask
if not isinstance(values, np.ndarray):
raise ValueError("Wide mask must be set with a numpy ndarray")
if len(values) == self._wide_mask_width and len(values.shape) == 1:
is_single_value = True
# Reshape so we can use the 0th entry below
_values = _values.reshape((1, self._wide_mask_width))
else:
# Non wide_mask
if isinstance(values, numbers.Integral):
if not self.is_integer_map:
raise ValueError("Cannot set non-integer map with an integer")
is_single_value = True
_values = np.array([values], dtype=self.dtype)
elif isinstance(values, numbers.Real):
if self.is_integer_map:
raise ValueError("Cannot set non-floating point map with a floating point.")
is_single_value = True
_values = np.array([values], dtype=self.dtype)
elif isinstance(values, (bool, np.bool_)):
is_single_value = True
_values = np.array([values], dtype=bool)
if isinstance(values, np.ndarray) and len(values) == 1:
is_single_value = True
# First, check if these are the same type
if not is_single_value and not isinstance(_values, np.ndarray):
raise ValueError("Values are not a numpy ndarray")
if hasattr(pixels, "__len__") and len(pixels) == 0:
if len(_values) != 0:
warnings.warn("Shape mismatch: using a non-zero-length array of values "
"to set a zero-length list of pixels.",
UserWarning)
# Nothing to do
return
if not nest:
_pix = hpg.ring_to_nest(self._nside_sparse, pixels)
else:
_pix = pixels
# Check numpy data type for everything but wide_mask single value
if not self._is_wide_mask or (self._is_wide_mask and not is_single_value):
if self._is_rec_array:
if self._sparse_map.dtype != _values.dtype:
raise ValueError("Data-type mismatch between sparse_map and values")
elif self._sparse_map.dtype.type != _values.dtype.type:
raise ValueError("Data-type mismatch between sparse_map and values")
# Check array lengths
if not is_single_value and len(_values) != pixels.size:
raise ValueError("Length of values must be same length as pixels (or length 1)")
if self._is_view:
# Check that we are not setting new pixels
if np.any(self.get_values_pix(_pix) == self._sentinel):
raise RuntimeError("This API cannot be used to set new pixels in the map.")
# Compute the coverage pixels
ipnest_cov = self._cov_map.cov_pixels(_pix)
# Check which pixels are in the coverage map
cov_mask = self.coverage_mask
in_cov = cov_mask[ipnest_cov]
out_cov = ~cov_mask[ipnest_cov]
# Replace values for those pixels in the coverage map
_indices = _pix[in_cov] + self._cov_map[ipnest_cov[in_cov]]
if is_single_value:
if operation == 'replace':
self._sparse_map[_indices] = _values[0]
elif operation == 'add':
# Put in a check to reset uncovered pixels to 0
if self._sentinel != 0:
self._sparse_map[_indices[self._sparse_map[_indices] == self._sentinel]] = 0
np.add.at(self._sparse_map, _indices, _values[0])
elif operation == 'or':
np.bitwise_or.at(self._sparse_map, _indices, _values[0])
elif operation == 'and':
np.bitwise_and.at(self._sparse_map, _indices, _values[0])
else:
if operation == 'replace':
self._sparse_map[_indices] = _values[in_cov]
elif operation == 'add':
# Put in a check to reset uncovered pixels to 0
if self._sentinel != 0:
self._sparse_map[_indices[self._sparse_map[_indices] == self._sentinel]] = 0
np.add.at(self._sparse_map, _indices, _values[in_cov])
elif operation == 'or':
np.bitwise_or.at(self._sparse_map, _indices, _values[in_cov])
elif operation == 'and':
np.bitwise_and.at(self._sparse_map, _indices, _values[in_cov])
# Update the coverage map for the rest of the pixels (if necessary)
if out_cov.sum() > 0:
# New version to minimize data copying
# Faster trick for getting unique values
new_cov_temp = np.zeros(cov_mask.size, dtype=np.int8)
new_cov_temp[ipnest_cov[out_cov]] = 1
new_cov_pix, = np.where(new_cov_temp > 0)
# Reserve the memory here
oldsize = len(self._sparse_map)
self._reserve_cov_pix(new_cov_pix)
_indices = _pix[out_cov] + self._cov_map[ipnest_cov[out_cov]] - oldsize
if is_single_value:
if operation == 'replace':
self._sparse_map[oldsize:][_indices] = _values[0]
elif operation == 'add':
# Put in a check to reset uncovered pixels to 0
if self._sentinel != 0:
self._sparse_map[oldsize:][_indices[self._sparse_map[_indices] == self._sentinel]] = 0
np.add.at(self._sparse_map[oldsize:], _indices, _values[0])
elif operation == 'or':
np.bitwise_or.at(self._sparse_map[oldsize:], _indices, _values[0])
elif operation == 'and':
np.bitwise_and.at(self._sparse_map[oldsize:], _indices, _values[0])
else:
if operation == 'replace':
self._sparse_map[oldsize:][_indices] = _values[out_cov]
elif operation == 'add':
# Put in a check to reset uncovered pixels to 0
if self._sentinel != 0:
self._sparse_map[oldsize:][_indices[self._sparse_map[_indices] == self._sentinel]] = 0
np.add.at(self._sparse_map[oldsize:], _indices, _values[out_cov])
elif operation == 'or':
np.bitwise_or.at(self._sparse_map[oldsize:], _indices, _values[out_cov])
elif operation == 'and':
np.bitwise_and.at(self._sparse_map[oldsize:], _indices, _values[out_cov])
def set_bits_pix(self, pixels, bits, nest=True):
"""
Set bits of a wide_mask map.
Parameters
----------
pixels : `np.ndarray`
Integer array of sparse_map pixel values
bits : `list`
List of bits to set
"""
if not self._is_wide_mask:
raise NotImplementedError("Can only use set_bits_pix on wide_mask map")
if np.max(bits) >= self._wide_mask_maxbits:
raise ValueError("Bit position %d too large (>= %d)" % (np.max(bits),
self._wide_mask_maxbits))
value = self._sparse_map[0].copy()
for bit in bits:
field, bitval = _get_field_and_bitval(bit)
value[field] |= bitval
self.update_values_pix(pixels, value, nest=nest, operation='or')
def clear_bits_pix(self, pixels, bits, nest=True):
"""
Clear bits of a wide_mask map.
Parameters
----------
pixels : `np.ndarray`
Integer array of sparse_map pixel values
bits : `list`
List of bits to clear
"""
if not self._is_wide_mask:
raise NotImplementedError("Can only use set_bits_pix on wide_mask map")
if np.max(bits) >= self._wide_mask_maxbits:
raise ValueError("Bit position %d too large (>= %d)" % (np.max(bits),
self._wide_mask_maxbits))
value = self._sparse_map[0].copy()
for bit in bits:
field, bitval = _get_field_and_bitval(bit)
value[field] |= bitval
# A bit reset is performed with &= ~(bit1 | bit2)
self.update_values_pix(pixels, ~value, nest=nest, operation='and')
def get_values_pos(self, ra_or_theta, dec_or_phi, lonlat=True, valid_mask=False):
"""
Get the map value for the position. Positions may be theta/phi
co-latitude and longitude in radians, or longitude and latitude in
degrees.
Parameters
----------
ra_or_theta : `float`, array-like
Angular coordinates of points on a sphere.
dec_or_phi : `float`, array-like
Angular coordinates of points on a sphere.
lonlat : `bool`, optional
If True, input angles are longitude and latitude in degrees.
Otherwise, they are co-latitude and longitude in radians.
valid_mask : `bool`, optional
Return mask of True/False instead of values
Returns
-------
values : `np.ndarray`
Array of values/validity from the map.
"""
return self.get_values_pix(hpg.angle_to_pixel(self._nside_sparse,
ra_or_theta,
dec_or_phi,
lonlat=lonlat),
valid_mask=valid_mask)
def get_values_pix(self, pixels, nest=True, valid_mask=False, nside=None):
"""
Get the map value for a set of pixels.
This routine will optionally convert from a higher resolution nside
to the nside of the sparse map.
Parameters
----------
pixel : `np.ndarray`
Integer array of healpix pixels.
nest : `bool`, optional
Are the pixels in nest scheme? Default is True.
valid_mask : `bool`, optional
Return mask of True/False instead of values
nside : `int`, optional
nside of pixels, if different from native.
Must be greater than the native nside.
Returns
-------
values : `np.ndarray`
Array of values/validity from the map.
"""
if hasattr(pixels, "__len__") and len(pixels) == 0:
if self._is_wide_mask:
return np.zeros((0, self._wide_mask_width), dtype=self.dtype)
else:
return np.array([], dtype=self.dtype)
if not nest:
_pix = hpg.ring_to_nest(self._nside_sparse, pixels)
else:
_pix = pixels
if nside is not None:
if nside < self._nside_sparse:
raise ValueError("nside must be higher resolution than the sparse map.")
# Convert pixels to sparse map resolution
bit_shift = _compute_bitshift(self._nside_sparse, nside)
_pix = np.right_shift(_pix, np.abs(bit_shift))
ipnest_cov = self._cov_map.cov_pixels(_pix)
if self._is_wide_mask:
values = self._sparse_map[_pix + self._cov_map[ipnest_cov], :]
else:
values = self._sparse_map[_pix + self._cov_map[ipnest_cov]]
if valid_mask:
if self._is_rec_array:
return (values[self._primary] != self._sentinel)
elif self._is_wide_mask:
return (values > 0).sum(axis=1, dtype=np.bool_)
else:
return (values != self._sentinel)
else:
# Just return the values
return values
def check_bits_pos(self, ra_or_theta, dec_or_phi, bits, lonlat=True):
"""
Check the bits at the map for an array of positions. Positions may be
theta/phi co-latitude and longitude in radians, or longitude and
latitude in degrees.
Parameters
----------
ra_or_theta : `float`, array-like
Angular coordinates of points on a sphere.
dec_or_phi : `float`, array-like
Angular coordinates of points on a sphere.
lonlat : `bool`, optional
If True, input angles are longitude and latitude in degrees.
Otherwise, they are co-latitude and longitude in radians.
bits : `list`
List of bits to check
Returns
-------
bit_flags : `np.ndarray`
Array of `np.bool_` flags on whether any of the input bits were
set
"""
return self.check_bits_pix(hpg.angle_to_pixel(self._nside_sparse,
ra_or_theta,
dec_or_phi,
lonlat=lonlat),
bits)
def check_bits_pix(self, pixels, bits, nest=True):
"""
Check the bits at the map for a set of pixels.
Parameters
----------
pixels : `np.ndarray`
Integer array of healpix pixels.
nest : `bool`, optional
Are the pixels in nest scheme? Default is True.
bits : `list`
List of bits to check
Returns
-------
bit_flags : `np.ndarray`
Array of `np.bool_` flags on whether any of the input bits were
set
"""
values = self.get_values_pix(np.atleast_1d(pixels), nest=nest)
bit_flags = None
for bit in bits:
field, bitval = _get_field_and_bitval(bit)
if bit_flags is None:
bit_flags = ((values[:, field] & bitval) > 0)
else:
bit_flags |= ((values[:, field] & bitval) > 0)
return bit_flags
@property
def sentinel(self):
"""
Get the sentinel of the map.
"""
return self._sentinel
@property
def dtype(self):
"""
get the dtype of the map
"""
return self._sparse_map.dtype
@property
def coverage_map(self):
"""
Get the fractional area covered by the sparse map
in the resolution of the coverage map
Returns
-------
cov_map : `np.ndarray`
Float array of fractional coverage of each pixel
"""
cov_map = np.zeros_like(self.coverage_mask, dtype=np.float64)
cov_mask = self.coverage_mask
npop_pix = np.count_nonzero(cov_mask)
if self._is_wide_mask:
shape_new = (npop_pix + 1,
self._cov_map.nfine_per_cov,
self._wide_mask_width)
sp_map_t = self._sparse_map.reshape(shape_new)
# This trickery first checks all the bits, and then sums into the
# coverage pixel
counts = np.sum(np.any(sp_map_t != self._sentinel, axis=2), axis=1)
else:
shape_new = (npop_pix + 1,
self._cov_map.nfine_per_cov)
if self._is_rec_array:
sp_map_t = self._sparse_map[self._primary].reshape(shape_new)
else:
sp_map_t = self._sparse_map.reshape(shape_new)
counts = np.sum((sp_map_t != self._sentinel), axis=1).astype(np.float64)
cov_map[cov_mask] = counts[1:]/self._cov_map.nfine_per_cov
return cov_map
@property
def coverage_mask(self):
"""
Get the boolean mask of the coverage map.
Returns
-------
cov_mask : `np.ndarray`
Boolean array of coverage mask.
"""
return self._cov_map.coverage_mask
def fracdet_map(self, nside):
"""
Get the fractional area covered by the sparse map at an arbitrary resolution.
This output fracdet_map counts the fraction of "valid" sub-pixels (those that
are not equal to the sentinel value) at the desired nside resolution.
Note: You should not compute the fracdet_map of an existing fracdet_map. To
get a fracdet_map at a lower resolution, use the degrade method with the
default "mean" reduction.
Parameters
----------
nside : `int`
Healpix nside for fracdet map. Must not be greater than sparse
resolution or less than coverage resolution.
Returns
-------
fracdet_map : `HealSparseMap`
Fractional coverage map.
"""
if nside > self.nside_sparse:
raise ValueError("Cannot return fracdet_map at higher resolution than "
"the sparse map (nside=%d)." % (self.nside_sparse))
if nside < self.nside_coverage:
raise ValueError("Cannot return fractdet_map at lower resolution than "
"the coverage map (nside=%d)." % (self.nside_coverage))
# This code is essentially a unification of coverage_map() and degrade()
# to get the fracdet_coverage in a single step
cov_mask = self.coverage_mask
npop_pix = np.count_nonzero(cov_mask)
bit_shift = _compute_bitshift(nside, self.nside_sparse)
nfine_per_frac = 2**bit_shift
nfrac_per_cov = self._cov_map.nfine_per_cov//nfine_per_frac
if self._is_wide_mask:
shape_new = ((npop_pix + 1)*nfrac_per_cov,
nfine_per_frac,
self._wide_mask_width)
sp_map_t = self._sparse_map.reshape(shape_new)
fracdet = np.sum(np.any(sp_map_t != self._sentinel, axis=2), axis=1).astype(np.float64)
else:
shape_new = ((npop_pix + 1)*nfrac_per_cov,
nfine_per_frac)
if self._is_rec_array:
sp_map_t = self._sparse_map[self._primary].reshape(shape_new)
else:
sp_map_t = self._sparse_map.reshape(shape_new)
fracdet = np.sum(sp_map_t != self._sentinel, axis=1).astype(np.float64)
fracdet /= nfine_per_frac
fracdet_cov_map = HealSparseCoverage.make_from_pixels(self.nside_coverage,
nside,
np.where(cov_mask)[0])
# The sentinel for a fracdet_map is 0.0, no coverage.
return HealSparseMap(cov_map=fracdet_cov_map, sparse_map=fracdet,
nside_sparse=nside, primary=self._primary,
sentinel=0.0)
@property
def nside_coverage(self):
"""
Get the nside of the coverage map
Returns
-------
nside_coverage : `int`
"""
return self._cov_map.nside_coverage
@property
def nside_sparse(self):
"""
Get the nside of the sparse map
Returns
-------
nside_sparse : `int`
"""
return self._nside_sparse
@property
def primary(self):
"""
Get the primary field
Returns
-------
primary : `str`
"""
return self._primary
@property
def is_integer_map(self):
"""
Check that the map is an integer map
Returns
-------
is_integer_map : `bool`
"""
if self._is_rec_array:
return False
return issubclass(self._sparse_map.dtype.type, (np.integer, np.bool_))
@property
def is_unsigned_map(self):
"""
Check that the map is an unsigned integer map
Returns
-------
is_unsigned_map : `bool`
"""
if self._is_rec_array:
return False
return issubclass(self._sparse_map.dtype.type, np.unsignedinteger)
@property
def is_wide_mask_map(self):
"""
Check that the map is a wide mask
Returns
-------
is_wide_mask_map : `bool`
"""
return self._is_wide_mask
@property
def wide_mask_width(self):
"""
Get the width of the wide mask
Returns
-------
wide_mask_width : `int`
Width of wide mask array. 0 if not wide mask.
"""
return self._wide_mask_width
@property
def wide_mask_maxbits(self):
"""
Get the maximum number of bits stored in the wide mask.
Returns
-------
wide_mask_maxbits : `int`
Maximum number of bits. 0 if not wide mask.
"""
if self._is_wide_mask:
return self._wide_mask_maxbits
else:
return 0
@property
def is_rec_array(self):
"""
Check that the map is a recArray map.
Returns
-------
is_rec_array : `bool`
"""
return self._is_rec_array
@property
def metadata(self):
"""
Return the metadata dict.
Returns
-------
metadata : `dict`
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Set the metadata dict.
This ensures that the keys conform to FITS standard (<=8 char string,
all caps.)
Parameters
----------
metadata : `dict`
"""
if metadata is None:
self._metadata = metadata
else:
if not isinstance(metadata, dict):
try:
metadata = dict(metadata)
except ValueError:
raise ValueError("Could not convert metadata to dict")
for key in metadata:
if not isinstance(key, str):
raise ValueError("metadata key %s must be a string" % (str(key)))
if not key.isupper():
raise ValueError("metadata key %s must be all upper case" % (key))
self._metadata = metadata
def generate_healpix_map(self, nside=None, reduction='mean', key=None, nest=True):
"""
Generate the associated healpix map
if nside is specified, then reduce to that nside
Parameters
----------
nside : `int`
Output nside resolution parameter (should be a multiple of 2). If
not specified the output resolution will be equal to the parent's
sparsemap nside_sparse
reduction : `str`
If a change in resolution is requested, this controls the method to
reduce the map computing the "mean", "median", "std", "max", "min",
"sum" or "prod" (product) of the neighboring pixels to compute the
"degraded" map.
key : `str`
If the parent HealSparseMap contains recarrays, key selects the
field that will be transformed into a HEALPix map.
nest : `bool`, optional
Output healpix map should be in nest format?
Returns
-------
hp_map : `np.ndarray`
Output HEALPix map with the requested resolution.
"""
# If no nside is passed, we generate a map with the same resolution as the original
if nside is None:
nside = self._nside_sparse
if self._is_rec_array:
if key is None:
raise ValueError('key should be specified for HealSparseMaps including `recarray`')
else:
# This is memory inefficient in that we are copying the memory
# to ensure that we get a unique healpix map. To not get a copy,
# you can do map['column'][:]
single_map = self.get_single(key, copy=True)
elif self._is_wide_mask:
raise NotImplementedError("Cannot make healpix map out of wide_mask")
else:
single_map = self
# If we're degrading, let that code do the datatyping
if nside < self._nside_sparse:
# degrade to new resolution
single_map = single_map.degrade(nside, reduction=reduction)
elif nside > self._nside_sparse:
raise ValueError("Cannot generate HEALPix map with higher resolution than the original.")
# Check to see if we have an integer map.
if issubclass(single_map._sparse_map.dtype.type, np.integer):
dtypeOut = np.float64
else:
dtypeOut = single_map._sparse_map.dtype
# Create an empty HEALPix map, filled with UNSEEN values
hp_map = np.full(hpg.nside_to_npixel(nside), hpg.UNSEEN, dtype=dtypeOut)
valid_pixels = single_map.valid_pixels
if not nest:
valid_pixels = hpg.nest_to_ring(nside, valid_pixels)
hp_map[valid_pixels] = single_map.get_values_pix(valid_pixels, nest=nest)
return hp_map
@property
def valid_pixels(self):
"""
Get an array of valid pixels in the sparse map.
Returns
-------
valid_pixels : `np.ndarray`
"""
if self._is_rec_array:
valid_pixel_inds, = np.where(self._sparse_map[self._primary] != self._sentinel)
elif self._is_wide_mask:
valid_pixel_inds, = np.where(np.any(self._sparse_map != self._sentinel, axis=1))
else:
valid_pixel_inds, = np.where(self._sparse_map != self._sentinel)
return valid_pixel_inds - self._cov_map[self._cov_map.cov_pixels_from_index(valid_pixel_inds)]
def valid_pixels_pos(self, lonlat=True, return_pixels=False):
"""
Get an array with the position of valid pixels in the sparse map.
Parameters
----------
lonlat: `bool`, optional
If True, input angles are longitude and latitude in degrees.
Otherwise, they are co-latitude and longitude in radians.
return_pixels: `bool`, optional
If true, return valid_pixels / co-lat / co-lon or
valid_pixels / lat / lon instead of lat / lon
Returns
-------
positions : `tuple`
By default it will return a tuple of the form (`theta`, `phi`) in radians
unless `lonlat = True`, for which it will return (`ra`, `dec`) in degrees.
If `return_pixels = True`, valid_pixels will be returned as first element
in tuple.
"""
if return_pixels:
valid_pixels = self.valid_pixels
lon, lat = hpg.pixel_to_angle(self.nside_sparse, valid_pixels, lonlat=lonlat)
return (valid_pixels, lon, lat)
else:
return hpg.pixel_to_angle(self.nside_sparse, self.valid_pixels, lonlat=lonlat)
@property
def n_valid(self):
"""
Get the number of valid pixels in the map.
Returns
-------
n_valid : `int`
"""
# This is more memory efficient to work with bits rather than
# integer indices.
if self._is_rec_array:
n_valid = np.sum(self._sparse_map[self._primary] != self._sentinel)
elif self._is_wide_mask:
n_valid = np.sum(np.any(self._sparse_map != self._sentinel, axis=1))
else:
n_valid = np.sum(self._sparse_map != self._sentinel)
return n_valid
def get_valid_area(self, degrees=True):
"""
Get the area covered by valid pixels
Parameters
----------
degrees : `bool` If True (default) returns the area in square degrees,
if False it returns the area in steradians
Returns
-------
valid_area : `float`
"""
return self.n_valid*hpg.nside_to_pixel_area(self._nside_sparse, degrees=degrees)
def _degrade(self, nside_out, reduction='mean', weights=None):
"""
Auxiliary method to reduce the resolution, i.e., increase the pixel size
of a given sparse map (which is called by `degrade`).
Parameters
----------
nside_out : `int`
Output Nside resolution parameter.
reduction : `str`
Reduction method (mean, median, std, max, min, and, or, sum, prod, wmean).
weights : `healSparseMap`
If the reduction is `wmean` this is the map with the weights to use.
It should have the same characteristics as the original map.
Returns
-------
healSparseMap : `HealSparseMap`
New map, at the desired resolution.
"""
if self._nside_sparse < nside_out:
raise ValueError('nside_out should be smaller than nside for the sparse_map.')
# Count the number of filled pixels in the coverage mask
npop_pix = np.count_nonzero(self.coverage_mask)
# We need the new bit_shifts and we have to build a new CovIndexMap
bit_shift = _compute_bitshift(self.nside_coverage, nside_out)
nfine_per_cov = 2**bit_shift
# Check weights and add guards
weight_values = None
if weights is not None:
if reduction != 'wmean':
warnings.warn('Weights only used with wmean reduction. Ignoring weights.',
UserWarning)
else:
# Check format/size of weight-map here.
if not isinstance(weights, HealSparseMap):
raise ValueError("weights must be a HealSparseMap.")
if weights.is_rec_array or weights.is_wide_mask_map or weights.is_integer_map:
raise ValueError("weights must be a floating-point map.")
bad_map = ((weights.nside_sparse != self.nside_sparse) or
(weights.nside_coverage != self.nside_coverage) or
(not np.array_equal(weights.valid_pixels, self.valid_pixels)))
if bad_map:
raise ValueError('weights dimensions must be the same as this map.')
weight_values = weights._sparse_map
# Set to zero weight those pixels that are not observed
# This is valid for all types of maps because they share the same valid_pixels.
weight_values[weight_values == weights._sentinel] = 0.0
weight_values = weight_values.reshape((npop_pix + 1,
(nside_out//self.nside_coverage)**2, -1))
elif reduction == 'wmean':
raise ValueError('Must specify weights when using wmean reduction.')
# At this point, the weight map has been checked and will only be used if
# the reduction is set to wmean.
# Work with wide masks
if self._is_wide_mask:
if reduction not in ['and', 'or']:
raise NotImplementedError('Cannot degrade a wide_mask map with this \
reduction operation, try and/or.')
else:
nbits = self._sparse_map.shape[1]
aux = self._sparse_map.reshape((npop_pix+1, (nside_out//self.nside_coverage)**2, -1, nbits))
sparse_map_out = reduce_array(aux, reduction=reduction, axis=2).reshape((-1, nbits))
sentinel_out = self._sentinel
# Work with RecArray (we have to change the resolution to all maps...)
elif self._is_rec_array:
dtype = []
sentinel_out = hpg.UNSEEN
# We should avoid integers
for key, value in self._sparse_map.dtype.fields.items():
if issubclass(self._sparse_map[key].dtype.type, np.integer):
dtype.append((key, np.float64))
else:
dtype.append((key, value[0]))
# Allocate new map
sparse_map_out = np.zeros((npop_pix + 1)*nfine_per_cov, dtype=dtype)
for key, value in sparse_map_out.dtype.fields.items():
aux = self._sparse_map[key].astype(np.float64)
aux[self._sparse_map[self._primary] == self._sentinel] = np.nan
aux = aux.reshape((npop_pix + 1, (nside_out//self.nside_coverage)**2, -1))
# Perform the reduction operation (check utils.reduce_array)
aux = reduce_array(aux, reduction=reduction, weights=weight_values)
# Transform back to sentinel value
aux[np.isnan(aux)] = sentinel_out
sparse_map_out[key] = aux
# Work with int array and ndarray
elif (issubclass(self._sparse_map.dtype.type, np.integer)) and (reduction in ['and', 'or']):
aux = self._sparse_map.reshape((npop_pix+1, (nside_out//self.nside_coverage)**2, -1))
sparse_map_out = reduce_array(aux, reduction=reduction)
sentinel_out = self._sentinel
else:
if issubclass(self._sparse_map.dtype.type, (np.integer, np.bool_)):
aux_dtype = np.float64
else:
aux_dtype = self._sparse_map.dtype
sentinel_out = hpg.UNSEEN
aux = self._sparse_map.astype(aux_dtype)
aux[self._sparse_map == self._sentinel] = np.nan
aux = aux.reshape((npop_pix + 1, (nside_out//self.nside_coverage)**2, -1))
aux = reduce_array(aux, reduction=reduction, weights=weight_values)
# NaN are converted to UNSEEN
aux[np.isnan(aux)] = sentinel_out
sparse_map_out = aux
# The coverage index map is now offset, we have to build a new one
# Note that we need to keep the same order of the coverage map
new_cov_map = HealSparseCoverage.make_from_pixels(self.nside_coverage,
nside_out,
self._cov_map._block_to_cov_index)
return HealSparseMap(cov_map=new_cov_map, sparse_map=sparse_map_out,
nside_sparse=nside_out, primary=self._primary, sentinel=sentinel_out)
def degrade(self, nside_out, reduction='mean', weights=None):
"""
Decrease the resolution of the map, i.e., increase the pixel size.
Parameters
----------
nside_out : `int`
Output nside resolution parameter.
reduction : `str`, optional
Reduction method (mean, median, std, max, min, and, or, sum, prod, wmean).
weights : `HealSparseMap`, optional
If the reduction is `wmean` this is the map with the weights to use.
It should have the same characteristics as the original map.
Returns
-------
healSparseMap : `HealSparseMap`
New map, at the desired resolution.
"""
if nside_out > self._nside_sparse:
raise ValueError("To increase the resolution of the map, use ``upgrade``.")
if nside_out < self.nside_coverage:
# The way we do the reduction requires nside_out to be >= nside_coverage
# we allocate a new map with the required nside_out
# CAUTION: This may require a lot of memory!!
warnings.warn("`nside_out` < `nside_coverage`. \
Allocating new map with nside_coverage=nside_out",
ResourceWarning)
sparse_map_out = HealSparseMap.make_empty_like(self,
nside_coverage=nside_out)
if weights is not None:
wgt_valid = weights.valid_pixels
_weights = HealSparseMap.make_empty_like(weights, nside_coverage=nside_out)
_weights[wgt_valid] = weights[wgt_valid]
weights = _weights
valid_pixels = self.valid_pixels
sparse_map_out[valid_pixels] = self[valid_pixels]
sparse_map_out = sparse_map_out._degrade(nside_out, reduction=reduction, weights=weights)
else:
if self._nside_sparse == nside_out:
sparse_map_out = self
else:
# Regular degrade
sparse_map_out = self._degrade(nside_out,
reduction=reduction,
weights=weights)
return sparse_map_out
def upgrade(self, nside_out):
"""
Increase the resolution of the map, i.e., decrease the pixel size.
All covering pixels will be duplicated at the higher resolution.
Parameters
----------
nside_out : `int`
Output nside resolution parameter.
Returns
-------
healSparseMap : `HealSparseMap`
New map, at the desired resolution.
"""
if self._nside_sparse >= nside_out:
raise ValueError("To decrease the resolution of the map, use ``degrade``.")
if self._is_wide_mask:
raise NotImplementedError("Upgrading wide masks is not supported.")
# Make an order preserving coverage map.
new_cov_map = HealSparseCoverage.make_from_pixels(self.nside_coverage,
nside_out,
self._cov_map._block_to_cov_index)
# And a new sparse map
bit_shift = _compute_bitshift(self._nside_sparse, nside_out)
nout_per_self = 2**bit_shift
# Nest maps at higher resolution are just repeats of the same values
new_sparse_map = np.repeat(self._sparse_map, nout_per_self)
return HealSparseMap(cov_map=new_cov_map, sparse_map=new_sparse_map,
nside_sparse=nside_out, primary=self._primary,
sentinel=self._sentinel)
def apply_mask(self, mask_map, mask_bits=None, mask_bit_arr=None, in_place=True):
"""
Apply an integer mask to the map. All pixels in the integer
mask that have any bits in mask_bits set will be zeroed in the
output map. The default is that this operation will be done
in place, but it may be set to return a copy with a masked map.
Parameters
----------
mask_map : `HealSparseMap`
Integer mask to apply to the map.
mask_bits : `int`, optional
Bits to be treated as bad in the mask_map.
Default is None (all non-zero pixels are masked)
mask_bit_arr : `list` or `np.ndarray`, optional
Array of bit values, used if mask_map is a wide_mask_map.
in_place : `bool`, optional
Apply operation in place. Default is True
Returns
-------
masked_map : `HealSparseMap`
self if in_place is True, a new copy otherwise
"""
# Check that the mask_map is an integer map (and not a recArray)
if not mask_map.is_integer_map:
raise RuntimeError("Can only apply a mask_map that is an integer map.")
if mask_bits is not None and mask_map.is_wide_mask_map:
raise RuntimeError("Cannot use mask_bits with wide_mask_map.")
# operate on this map valid_pixels
valid_pixels = self.valid_pixels
if mask_bits is None:
if mask_map.is_wide_mask_map:
if mask_bit_arr is None:
bad_pixels, = np.where(mask_map.get_values_pix(valid_pixels).sum(axis=1) > 0)
else:
# loop over mask_bit_arr
mask_values = mask_map.get_values_pix(valid_pixels)
bad_pixel_flag = None
for bit in mask_bit_arr:
field, bitval = _get_field_and_bitval(bit)
if bad_pixel_flag is None:
bad_pixel_flag = ((mask_values[:, field] & bitval) > 0)
else:
bad_pixel_flag |= ((mask_values[:, field] & bitval) > 0)
bad_pixels, = np.where(bad_pixel_flag)
else:
bad_pixels, = np.where(mask_map.get_values_pix(valid_pixels) > 0)
else:
bad_pixels, = np.where((mask_map.get_values_pix(valid_pixels) & mask_bits) > 0)
if in_place:
new_map = self
else:
new_map = HealSparseMap(cov_map=self._cov_map.copy(),
sparse_map=self._sparse_map.copy(),
nside_sparse=self._nside_sparse,
primary=self._primary,
sentinel=self._sentinel)
new_value = new_map._sparse_map[0]
ipnest_cov = self._cov_map.cov_pixels(valid_pixels[bad_pixels])
new_map._sparse_map[valid_pixels[bad_pixels] + new_map._cov_map[ipnest_cov]] = new_value
return new_map
def interpolate_pos(self, ra_or_theta, dec_or_phi, lonlat=True, allow_partial=False):
"""
Return the bilinear interpolation of the map using 4 nearest neighbors.
Parameters
----------
ra_or_theta : `float`, array-like
Angular coordinates of points on a sphere.
dec_or_phi : `float`, array-like
Angular coordinates of points on a sphere.
lonlat : `bool`, optional
If True, input angles are longitude and latitude in degrees.
Otherwise, they are co-latitude and longitude in radians.
allow_partial : `bool`, optional
If this is True, then unseen (not validvalid) neighbors will be
ignored and the output value will be the weighted average of the
valid neighbors. Otherwise, if any neighbor is not valid then
the interpolated value will be set to UNSEEN.
Returns
-------
values : `np.ndarray`
Array of interpolated values corresponding to input positions.
The return array will always be 64-bit floats.
Notes
-----
The interpolation routing works only on numeric data, and not on wide
mask maps, recarray maps, or boolean maps.
"""
if self._is_wide_mask:
raise NotImplementedError("Interpolation does not run on a wide mask map.")
elif self._is_rec_array:
raise NotImplementedError("Interpolation does not run on a recarray map.")
elif isinstance(self._sentinel, bool):
raise NotImplementedError("Interpolation does not run on a boolean map.")
interp_pix, interp_wgt = hpg.get_interpolation_weights(
self.nside_sparse,
np.atleast_1d(ra_or_theta),
np.atleast_1d(dec_or_phi),
lonlat=lonlat,
)
aux = self.get_values_pix(interp_pix)
out_of_bounds = (aux == self._sentinel)
aux = aux.astype(np.float64)
aux[out_of_bounds] = np.nan
if not allow_partial:
# Any pixel that has an out-of-bounds neighbor will be set to UNSEEN.
values = np.nansum(aux * interp_wgt, axis=1) / np.sum(interp_wgt, axis=1)
values[~np.all(~out_of_bounds, axis=1)] = hpg.UNSEEN
else:
# Use only the neighbor pixels that are valid.
interp_wgt[out_of_bounds] = np.nan
wgt_sum = np.nansum(interp_wgt, axis=1)
values = np.nansum(aux * interp_wgt, axis=1)
all_bad = (wgt_sum == 0.0)
values[~all_bad] /= wgt_sum[~all_bad]
# Any pixel that has all bad neighbors will be UNSEEN.
values[all_bad] = hpg.UNSEEN
return values
def __getitem__(self, key):
"""
Get part of a healpix map.
"""
if isinstance(key, str):
if not self._is_rec_array:
raise IndexError("HealSparseMap is not a recarray map, cannot use string index.")
return self.get_single(key, sentinel=None)
elif isinstance(key, numbers.Integral):
# Get a single pixel
# Return a single (non-array) value
return self.get_values_pix(np.array([key]))[0]
elif isinstance(key, slice):
# Get a slice of pixels
start = key.start if key.start is not None else 0
stop = key.stop if key.stop is not None else hpg.nside_to_npixel(self._nside_sparse)
step = key.step if key.step is not None else 1
return self.get_values_pix(np.arange(start, stop, step))
elif isinstance(key, np.ndarray):
# Make sure that it's integers
test_value = np.zeros(1, key.dtype)[0]
if not is_integer_value(test_value):
raise IndexError("Numpy array indices must be integers for __getitem__")
return self.get_values_pix(key)
elif isinstance(key, list):
# Make sure that it's integers
arr = np.atleast_1d(key)
if len(arr) > 0:
if not is_integer_value(arr[0]):
raise IndexError("List array indices must be integers for __getitem__")
return self.get_values_pix(arr)
else:
raise IndexError("Illegal index type (%s) for __getitem__ in HealSparseMap." %
(key.__class__))
def __setitem__(self, key, value):
"""
Set part of a healpix map
"""
if isinstance(key, numbers.Integral):
# Set a single pixel
return self.update_values_pix(np.array([key]), value)
elif isinstance(key, slice):
# Set a slice of pixels
start = key.start if key.start is not None else 0
stop = key.stop if key.stop is not None else hpg.nside_to_npixel(self._nside_sparse)
step = key.step if key.step is not None else 1
return self.update_values_pix(np.arange(start, stop, step),
value)
elif isinstance(key, np.ndarray):
test_value = np.zeros(1, key.dtype)[0]
if not is_integer_value(test_value):
raise IndexError("Numpy array indices must be integers for __setitem__")
return self.update_values_pix(key, value)
elif isinstance(key, list):
arr = np.atleast_1d(key)
if len(arr) > 0 and not is_integer_value(arr[0]):
raise IndexError("List/Tuple array indices must be integers for __setitem__")
return self.update_values_pix(arr, value)
else:
raise IndexError("Illegal index type (%s) for __setitem__ in HealSparseMap." %
(key.__class__))
def get_single(self, key, sentinel=None, copy=False):
"""
Get a single healsparse map out of a recarray map, with the ability to
override a sentinel value.
Parameters
----------
key : `str`
Field for the recarray
sentinel : `int` or `float` or None, optional
Override the default sentinel value. Default is None (use default)
Returns
-------
single_map : `HealSparseMap`
"""
if not self._is_rec_array:
raise TypeError("HealSparseMap is not a recarray map")
# If we are the primary key, use the sentinel as set. Otherwise,
# use the default sentinel unless otherwise overridden.
if key == self._primary:
_sentinel = check_sentinel(self._sparse_map[key].dtype.type, self._sentinel)
else:
_sentinel = check_sentinel(self._sparse_map[key].dtype.type, sentinel)
if not copy:
# This will not copy memory which allows in-recarray assignment.
# Problems can potentially happen with mixed type recarrays depending
# on how they were constructed (though using make_empty should be safe).
# However, these linked maps cannot be used to add new pixels which
# is why there is the _is_view flag.
return HealSparseMap(cov_map=self._cov_map,
sparse_map=self._sparse_map[key],
nside_sparse=self._nside_sparse, sentinel=_sentinel,
_is_view=True)
new_sparse_map = np.full_like(self._sparse_map[key], _sentinel)
valid_indices = (self._sparse_map[self._primary] != self._sentinel)
new_sparse_map[valid_indices] = self._sparse_map[key][valid_indices]
return HealSparseMap(cov_map=self._cov_map, sparse_map=new_sparse_map,
nside_sparse=self._nside_sparse, sentinel=_sentinel)
def get_single_covpix_map(self, covpix):
"""
Get a healsparse map for a single coverage pixel.
Note that this makes a copy of the data.
Parameters
----------
covpix : `int`
Coverage pixel to copy
Returns
-------
single_pixel_map : `HealSparseMap`
Copy of map with a single coverage pixel.
"""
nfine_per_cov = self._cov_map._nfine_per_cov
if self._cov_map[covpix] + covpix*nfine_per_cov < nfine_per_cov:
# Pixel is not in the coverage map; return an empty map
return HealSparseMap.make_empty_like(self)
new_cov_map = HealSparseCoverage.make_from_pixels(self.nside_coverage,
self._nside_sparse,
[covpix])
if self._is_wide_mask:
new_sparse_map = np.zeros((2*nfine_per_cov, self._wide_mask_width), dtype=self.dtype)
# Copy overflow bin
new_sparse_map[0: nfine_per_cov, :] = self._sparse_map[0: nfine_per_cov, :]
# Copy the pixel
new_sparse_map[nfine_per_cov: 2*nfine_per_cov, :] = self._sparse_map[
self._cov_map[covpix] + covpix*nfine_per_cov:
self._cov_map[covpix] + covpix*nfine_per_cov + nfine_per_cov, :]
else:
new_sparse_map = np.zeros(2*nfine_per_cov, dtype=self.dtype)
# Copy overflow bin
new_sparse_map[0: nfine_per_cov] = self._sparse_map[0: nfine_per_cov]
# Copy the pixel
new_sparse_map[nfine_per_cov: 2*nfine_per_cov] = self._sparse_map[
self._cov_map[covpix] + covpix*nfine_per_cov:
self._cov_map[covpix] + covpix*nfine_per_cov + nfine_per_cov]
return HealSparseMap(cov_map=new_cov_map, sparse_map=new_sparse_map,
nside_sparse=self._nside_sparse, primary=self._primary,
sentinel=self._sentinel)
def get_covpix_maps(self):
"""
Get all single covpixel maps, one at a time.
Yields
------
single_pixel_map : `HealSparseMap`
"""
cov_pixels, = np.where(self._cov_map.coverage_mask)
for cov_pix in cov_pixels:
yield self.get_single_covpix_map(cov_pix)
def astype(self, dtype, sentinel=None):
"""
Convert sparse map to a different numpy datatype, including sentinel
values. If sentinel is not specified the default for the converted
datatype is used (`UNSEEN` for float, and -MAXINT for ints).
Parameters
----------
dtype : `numpy.dtype`
Valid numpy dtype for a single array.
sentinel : `int` or `float`, optional
Converted map sentinel value.
Returns
-------
sparse_map : `HealSparseMap`
New map with new data type.
"""
if self._is_rec_array:
raise RuntimeError("Cannot convert datatype of a recarray map.")
elif self._is_wide_mask:
raise RuntimeError("Cannot convert datatype of a wide mask.")
new_sparse_map = np.zeros(self._sparse_map.shape, dtype=dtype)
valid_pix = (self._sparse_map != self._sentinel)
new_sparse_map[valid_pix] = self._sparse_map[valid_pix].astype(dtype)
_sentinel = check_sentinel(new_sparse_map.dtype.type, sentinel)
new_sparse_map[~valid_pix] = _sentinel
return HealSparseMap(cov_map=self._cov_map, sparse_map=new_sparse_map,
nside_sparse=self.nside_sparse, sentinel=_sentinel)
def __add__(self, other):
"""
Add a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.add)
def __iadd__(self, other):
"""
Add a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.add, in_place=True)
def __sub__(self, other):
"""
Subtract a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.subtract)
def __isub__(self, other):
"""
Subtract a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.subtract, in_place=True)
def __mul__(self, other):
"""
Multiply a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.multiply)
def __imul__(self, other):
"""
Multiply a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.multiply, in_place=True)
def __truediv__(self, other):
"""
Divide a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.divide)
def __itruediv__(self, other):
"""
Divide a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.divide, in_place=True)
def __pow__(self, other):
"""
Raise the map to a power.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.power)
def __ipow__(self, other):
"""
Divide a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.power, in_place=True)
def __and__(self, other):
"""
Perform a bitwise and with a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_and, int_only=True)
def __iand__(self, other):
"""
Perform a bitwise and with a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_and, int_only=True, in_place=True)
def __xor__(self, other):
"""
Perform a bitwise xor with a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_xor, int_only=True)
def __ixor__(self, other):
"""
Perform a bitwise xor with a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_xor, int_only=True, in_place=True)
def __or__(self, other):
"""
Perform a bitwise or with a constant.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_or, int_only=True)
def __ior__(self, other):
"""
Perform a bitwise or with a constant, in place.
Cannot be used with recarray maps.
"""
return self._apply_operation(other, np.bitwise_or, int_only=True, in_place=True)
def _apply_operation(self, other, func, int_only=False, in_place=False):
"""
Apply a generic arithmetic function.
Cannot be used with recarray maps.
Parameters
----------
other : `int` or `float` (or numpy equivalents)
The other item to perform the operator on.
func : `np.ufunc`
The numpy universal function to apply.
int_only : `bool`, optional
Only accept integer types. Default is False.
in_place : `bool`, optional
Perform operation in-place. Default is False.
Returns
-------
result : `HealSparseMap`
Resulting map
"""
name = func.__str__()
if self._is_rec_array:
raise NotImplementedError("Cannot use %s with recarray maps" % (name))
if int_only:
if not self.is_integer_map:
raise NotImplementedError("Can only apply %s to integer maps" % (name))
else:
# If not int_only then it can't be used with a wide mask.
if self._is_wide_mask:
raise NotImplementedError("Cannot use %s with wide mask maps" % (name))
other_int = False
other_float = False
other_bits = False
if isinstance(other, numbers.Integral):
other_int = True
elif isinstance(other, numbers.Real):
other_float = True
elif isinstance(other, (tuple, list)):
if not self._is_wide_mask:
raise NotImplementedError("Must use a wide mask to operate with a bit list")
other_bits = True
for elt in other:
if not isinstance(elt, numbers.Integral):
raise NotImplementedError("Can only use an integer list of bits "
"with %s operation" % (name))
if np.max(other) >= self._wide_mask_maxbits:
raise ValueError("Bit position %d too large (>= %d)" % (np.max(other),
self._wide_mask_maxbits))
if self._is_wide_mask:
if not other_bits:
raise NotImplementedError("Must use a bit list with the %s operation with "
"a wide mask" % (name))
else:
if not other_int and not other_float:
raise NotImplementedError("Can only use a constant with the %s operation" % (name))
if not other_int and int_only:
raise NotImplementedError("Can only use an integer constant with the %s operation" % (name))
if self._is_wide_mask:
valid_sparse_pixels = (self._sparse_map != self._sentinel).sum(axis=1, dtype=np.bool_)
other_value = np.zeros(self._wide_mask_width, self._sparse_map.dtype)
for bit in other:
field, bitval = _get_field_and_bitval(bit)
other_value[field] |= bitval
else:
valid_sparse_pixels = (self._sparse_map != self._sentinel)
if in_place:
if self._is_wide_mask:
for i in range(self._wide_mask_width):
col = self._sparse_map[:, i]
func(col, other_value[i], out=col, where=valid_sparse_pixels)
else:
func(self._sparse_map, other, out=self._sparse_map, where=valid_sparse_pixels)
return self
else:
combinedSparseMap = self._sparse_map.copy()
if self._is_wide_mask:
for i in range(self._wide_mask_width):
col = combinedSparseMap[:, i]
func(col, other_value[i], out=col, where=valid_sparse_pixels)
else:
func(combinedSparseMap, other, out=combinedSparseMap, where=valid_sparse_pixels)
return HealSparseMap(cov_map=self._cov_map, sparse_map=combinedSparseMap,
nside_sparse=self._nside_sparse, sentinel=self._sentinel)
def __copy__(self):
return HealSparseMap(cov_map=self._cov_map.copy(),
sparse_map=self._sparse_map.copy(), nside_sparse=self._nside_sparse,
sentinel=self._sentinel, primary=self._primary)
def copy(self):
return self.__copy__()
def __repr__(self):
return self.__str__()
def __str__(self):
descr = 'HealSparseMap: nside_coverage = %d, nside_sparse = %d' % (self.nside_coverage,
self._nside_sparse)
if self._is_rec_array:
descr += ', record array type.\n'
descr += self._sparse_map.dtype.descr.__str__()
elif self._is_wide_mask:
descr += ', %d bit wide mask' % (self._wide_mask_maxbits)
else:
descr += ', ' + self._sparse_map.dtype.name
return descr
|
import cx_Oracle
import getpass
user = input("Username [%s]: " % getpass.getuser())
if not user:
user=getpass.getuser()
pw = getpass.getpass()
conString=''+user+'/' + pw +'@gwynne.cs.ualberta.ca:1521/CRS'
connection = cx_Oracle.connect(conString)
cursor = connection.cursor()
pid=101
title="Window"
place="Utah"
f_image = open('window-sm.jpg','rb')
image = f_image.read()
# prepare memory for operation parameters
cursor.setinputsizes(image=cx_Oracle.LONG_BINARY)
insert = """insert into pictures (photo_id, title, place, image)
values (:photo_id, :title, :place, :image)"""
cursor.execute(insert,{'photo_id':pid, 'title':title,
'place':place, 'image':image})
connection.commit()
# Housekeeping...
f_image.close()
cursor.close()
connection.close()
tutorial.py
try:
connection = cx_Oracle.connect(connStr)
curs = connection.cursor()
curs.execute(createStr)
data = [('Quadbury', 101, 7.99, 0, 0),
('Almond roca', 102, 8.99, 0, 0),
('Golden Key', 103, 3.99, 0, 0)]
cursInsert = connection.cursor()
cursInsert.bindarraysize = 3
cursInsert.setinputsizes(32, int, float, int, int)
cursInsert.executemany("INSERT INTO TOFFEES(T_NAME, SUP_ID, PRICE, SALES, TOTAL) "
"VALUES (:1, :2, :3, :4, :5)", data)
connection.commit()
curs.execute("SELECT * from TOFFEES")
rows = curs.fetchall()
for row in rows:
print(row)
curs.close()
cursInsert.close()
connection.close() |
# Generates the intent schema and sample utterances for WordBox based on 1,000 of the most common words in the English language
# Intent Schema
# {
# "intents": [
# {
# "slots": [
# {
# "name": "Word",
# "type": "AMAZON.LITERAL"
# }
# ],
# "intent": "GetSynonymIntent"
# },
# ]
# }
# Sample Utterances
# GetSynonymIntent a synonym for {happy|Word}
# GetAntonymIntent a synonym for {evil|Word}
import os
import json
dev = 0
input_file = "freq_small.txt" if dev else "freq.txt"
schema_output_file = "intent_schema_small.json" if dev else "intent_schema.json"
utterances_output_file = "sample_utterances_small.txt" if dev else "sample_utterances.txt"
with open(os.path.dirname(os.path.realpath(__file__)) + "/" + input_file, "r") as f:
all_words = [line.strip() for line in f]
custom_intents = [
("GetSynonymIntent", ("a synonym for",)),
("GetAntonymIntent", ("an antonym for",)),
("GetPOSIntent", ("the part of speech for",)),
("GetRhymeIntent", ("a rhyme for",)),
("GetDefinitionIntent", ("the definition of",)),
("GetDefinitionIntent", ("define",)),
("GetSyllablesIntent", ("the syllables for",)),
("GetSyllablesIntent", ("the syllables of",)),
("GetFrequencyIntent", ("the frequency of",)),
("GetFrequencyIntent", ("how common", "is")),
("GetPronunciationIntent", ("how to pronounce",)),
("GetPronunciationIntent", ("pronunciation of",))
]
# Intent schema
intent_json = {"intents": []}
intent_json["intents"].append({"intent": "AMAZON.CancelIntent"})
intent_json["intents"].append({"intent": "AMAZON.HelpIntent"})
intent_json["intents"].append({"intent": "AMAZON.StopIntent"})
for intent, utterance in custom_intents:
intent_exists = False
for json_intent in intent_json["intents"]:
# print(intent)
# print(json_intent["name"])
if intent == json_intent["intent"]:
intent_exists = True
break
if not intent_exists:
intent_json["intents"].append({"intent": intent, "slots": [{"name": "Word", "type": "AMAZON.LITERAL"}]})
with open(os.path.dirname(os.path.realpath(__file__)) + "/" + schema_output_file, "w") as f:
json.dump(intent_json, f)
# Sample utterances
all_utterances = []
with open(os.path.dirname(os.path.realpath(__file__)) + "/" + utterances_output_file, 'w') as f:
for intent, utterance in custom_intents:
for word in all_words:
f.write(intent + " " + utterance[0] + " {" + word + "|Word}" + ((" " + utterance[1]) if (len(utterance) > 1) else "") + "\n")
|
from openerp.osv import osv, fields
from openerp import netsvc
class plan_carry(osv.osv):
_name= 'plan.carry'
_columns= {
'type': fields.selection([('vote','Voting'),('poll','Poll')],'Type'),
'group': fields.char('User Group'),
'start_date': fields.date('Start Date'),
'finish_date': fields.date('Finish Date'),
'state': fields.selection([('new','New'),('done','Done')]),
}
class voting_tasks(osv.osv):
_name='voting.tasks'
_columns={
'name': fields.char('The Name of the Vote'),
'group': fields.char('User Group'),
'start_date': fields.date('Start Date'),
'finish_date': fields.date('Finish Date'),
'responsible': fields.many2one('res.users','Responsible'),
'state': fields.selection([('new1','New'),('confirm1','Confirm'),('done1','Done')]),
'answer_line': fields.one2many('voiting.answers','partner_id'),
'answer_id': fields.related('answer_line','answer_id',type="many2one",relation="answer.type")
}
class voiting_answers(osv.osv):
_name='voiting.answers'
_columns={
'partner_id': fields.integer('Partner ID'),
'answer_id': fields.many2one('answer.type','Variant of Answers'),
}
class answer_type(osv.osv):
_name='answer.type'
_columns={
'answer_name': fields.char('Variants of Answers',required=True)
}
class question_tasks(osv.osv):
_name='question.tasks'
_columns={
'name': fields.char('The Name of the Surveys'),
'group': fields.char('User Group'),
'start_date': fields.date('Start Date'),
'finish_date': fields.date('Finish Date'),
'responsible': fields.many2one('res.users','Responsible'),
'state': fields.selection([('new2','New'),('confirm2','Confirm'),('done2','Done')]),
'question_line': fields.one2many('voiting.questions','partner_id'),
'question_id': fields.related('question_line','question_id',type="many2one",relation="question.type")
}
class voting_question(osv.osv):
_name='voiting.questions'
_columns={
'question_id': fields.many2one('question.type','A List of Questions'),
'partner_id': fields.integer('Partner ID')
}
class question_type(osv.osv):
_name='question.type'
_columns={
'question_name': fields.char('Variants of Questions',required=True)
} |
name = input('Enter customer name\n')
print('How may items store have?')
number = int(input()) * len(name)
print('-------------------')
print('Welcome {name}!\nOur store have {number} items'.format(name = name, number = number)) |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
pmu = [2, 6, 10, 19, 20, 22, 23, 25, 29]
n = 3
df = 39
# test_buses = [1, 1, 1, 1]
test_buses = []
for i in range(n):
test_buses.append(1)
print(test_buses)
def create_test_buses(n, test_buses, pmu, df):
for k in range(n):
if test_buses[k] == df:
test_buses[k] = 1
while test_buses[k] in pmu:
test_buses[k] = test_buses[k] + 1
# if k == df:
# test_buses[k + 1] = 1
# # for i in range(1,n):
# # if
# else:
# test_buses[k + 1] = test_buses[k + 1] + 1
# break
else:
test_buses[k] = test_buses[k] + 1
while test_buses[k] in pmu:
test_buses[k] = test_buses[k] + 1
break
# if k == n-1
# test_buses[k+1] =
return test_buses
for i in range((df - len(pmu))**n):
test_buses = create_test_buses(n, test_buses, pmu, df)
print(test_buses)
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def naseberry():
return "Hello World!"
@app.route("/something")
def saySomething():
return render_template("ourfirsttemplate.html",
title="learning flask",
heading="time to learn flask",
message="Flask isn't too bad!")
@app.route("/process", methods=['GET', 'POST'])
def process_request():
name = request.args.get('name')
return render_template("process.html",
name=name)
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8080, debug=True) |
def is_board_full(board):
for lists in board:
for item in lists:
if item == " ":
return False
return True
def is_valid_move(board, location):
if location not in range(1, 10):
return False
row = (location - 1) // len(board)
col = (location - 1) % len(board[row])
if board[row][col] == ' ':
return True
else:
return False
'''
for i in range (0,3):
if board[i][i + 1] == "X" or board[i][i + 1] == "O":
return True
elif board[i + 1][i] == "X" or board[i + 1][i] == "O":
return True
elif board[i + 1][i - 1] == "X" or board[i + 1][i - 1] == "O":
return True
elif board[i + 1][i + 1] == "X" or board[i + 1][i + 1] == "O":
return True
return False
'''
def winning_move(board):
for row in range(0, 3):
if board[row][0] == board[row][1] and board[row][1] == board[row][2] and board[row][0] != " ":
return True
for col in range(0, 3):
if board[0][col] == board[1][col] and board[1][col] == board[2][col] and board[2][col] != " ":
return True
if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[2][2] != " ":
return True
if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[2][0] != " ":
return True
return False
def convert_to_board_index(board, choose_spot):
if choose_spot in range(1, 4):
row = 0
elif choose_spot in range(4, 7):
row = 1
elif choose_spot in range(7, 10):
row = 2
if choose_spot == 1 or choose_spot == 4 or choose_spot == 7:
col = 0
elif choose_spot == 2 or choose_spot == 5 or choose_spot == 8:
col = 1
elif choose_spot == 3 or choose_spot == 6 or choose_spot == 9:
col = 2
if current_player == player1:
board[row][col] = 'X'
elif current_player == player2:
board[row][col] = 'O'
board = [[" ", " ", " "], [" ", " ", " "], [" ", " ", " "]]
def print_board(board):
print("\n {} | {} | {}\n---|---|---\n {} | {} | {}\n---|---|---\n {} | {} | {}".format(
board[0][0], board[0][1], board[0][2], board[1][0], board[1][1], board[1][2], board[2][0], board[2][1], board[2][2]))
welcome_message = """Welcome to Tic Tac Toe!
This game supports two players.
Each player will alternate placing an X or O on the 3x3 grid until one player gets 3 of their mark in a row, column, or diagonal.
If the board fills up without anyone getting 3 in a row, the players will tie.
You will tell the computer where to put your mark by using the numbering system below:
1 | 2 | 3
---|---|---
4 | 5 | 6
---|---|---
7 | 8 | 9
Good luck!
"""
print(welcome_message)
player1 = input("Player 1, please enter your name: ")
player2 = input("Player 2, please enter your name: ")
current_player = player1
game_over = False
while not game_over:
choose_spot = int(
input("\n{}, please choose a location to place your X: ".format(current_player)))
if is_valid_move(board, choose_spot) == True:
convert_to_board_index(board, choose_spot)
print_board(board)
if current_player == player1:
current_player = player2
else:
current_player = player1
print("Thanks for playing!") |
import sys
from validatelib import *
if __name__ == '__main__':
result = ExecutionInfo('assigment example', './decrypt', ['task3.2_pwsmall.txt', 'task3.2_dict.txt'], TextFileInfo('output.txt', '^((user906;Bahnhof.*?user\d*;.*?)|(user\d*;.*?user906;Bahnhof))$')).run()
try:
solution = TextFileInfo('taskCryptSolution.txt', '^((user906;Bahnhof.*?user\d*;.*?)|(user\d*;.*?user906;Bahnhof))$')
solution.ensurefileexists()
solution.checkfile()
except ErrorMessage as e:
e.show()
result += e.errorcode
sys.exit(result) |
# --------------------------------------------------------------------------------------------------
# AWS Settings
# --------------------------------------------------------------------------------------------------
# Kinesis
KINESIS_STREAM_NAME = 'IncomingDataStream'
# DynamoDB Table and Column Names
STATE_TABLE_NAME = 'StateTable'
STATE_TABLE_KEY = 'id'
DELTA_TABLE_NAME = 'ReduceTable'
DELTA_TABLE_KEY = 'MessageId'
AGGREGATE_TABLE_NAME = 'AggregateTable'
AGGREGATE_TABLE_KEY = 'Identifier'
MESSAGE_COUNT_NAME = 'message_count'
PARAMETER_TABLE_NAME = 'ParameterTable'
PARAMETER_TABLE_KEY = 'parameter'
PARAMETER_COLUMN_NAME = 'value'
ID_COLUMN_NAME = 'TradeID'
VERSION_COLUMN_NAME = 'Version'
VALUE_COLUMN_NAME = 'Value'
TIMESTAMP_COLUMN_NAME = 'Timestamp'
HIERARCHY_COLUMN_NAME = 'Hierarchy'
HIERARCHY_DEFINITION = {
'RiskType' : ['PV', 'Delta'],
'Region' : ['EMEA', 'APAC', 'AMER'],
'TradeDesk' : ['FXSpot', 'FXOptions']
}
TIMESTAMP_GENERATOR_FIRST = 'timestamp_generator_first'
TIMESTAMP_GENERATOR_MEAN = 'timestamp_generator_mean'
# --------------------------------------------------------------------------------------------------
# Aggregation Settings
# --------------------------------------------------------------------------------------------------
# Definition of the Hierarchy
AGGREGATION_HIERARCHY = ['RiskType', 'TradeDesk', 'Region'] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from analyse_immo.factory import Factory
from analyse_immo.charge import Charge
from analyse_immo.lot import Lot
from test.testcase_fileloader import TestCaseFileLoader
class TestCharge(TestCaseFileLoader):
def setUp(self):
super().setUp()
self.defaut = Factory.make_defaut(self.defaut_data)
def testInit(self):
_ = Charge(None)
_ = Charge(None, None)
def testAddMontant(self):
lot = Lot('T2', 45, 450)
charge = Charge(lot)
type_ = Charge.charge_e.taxe_fonciere
charge.add(type_, 500)
self.assertEqual(charge.get_montant_annuel(type_), 500)
def testAdd0(self):
lot = Lot('T2', 45, 450)
charge = Charge(lot)
type_ = Charge.charge_e.taxe_fonciere
charge.add(type_, 0)
self.assertEqual(charge.get_montant_annuel(type_), 0)
def testAddVacanceLocative0(self):
lot = Lot('T2', 45, 450)
charge = Charge(lot)
type_ = Charge.charge_e.vacance_locative
charge.add(type_, 0)
self.assertEqual(charge.get_montant_annuel(type_), 0)
def testAddVacanceLocative1A(self):
'''no default'''
lot = Lot('T2', 45, 450)
charge = Charge(lot)
type_ = Charge.charge_e.vacance_locative
charge.add(type_, 1)
self.assertEqual(charge.get_montant_annuel(type_), 0)
def testAddVacanceLocative1B(self):
lot = Lot('T2', 45, 450)
charge = Charge(lot, self.defaut)
type_ = Charge.charge_e.vacance_locative
charge.add(type_, 1)
def testAddTravauxProvision(self):
'''default'''
lot = Lot('T2', 45, 500)
charge = Charge(lot, self.defaut)
type_ = Charge.charge_e.provision_travaux
charge.add(type_, 1)
self.assertEqual(charge.get_taux(type_), 0.01)
self.assertEqual(charge.get_montant_annuel(type_), 60)
def testAddMissingDefaut(self):
lot = Lot('T2', 45, 500)
charge = Charge(lot, self.defaut)
type_ = Charge.charge_e.copropriete
with self.assertRaises(LookupError):
charge.add(type_, 1)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (0, 50)
import pygame
import socket
import sys
import math
pixels = []
brightness = 50
MAX_BRIGHTNESS = 250
MATRIX_START_ID = 33
LCDS = []
printer_text = None
class Neopixel():
def __init__(self, x, y):
self.x = x
self.y = y
self.width = 30
self.height = 30
self.status = True
self.color = (15,15,15)
def render(self, screen):
color = self.color
if color[0] + color[1] + color[2] == 0:
color = [15,15,15]
else:
color = (self.color[0]*brightness/MAX_BRIGHTNESS, self.color[1]*brightness/MAX_BRIGHTNESS, self.color[2]*brightness/MAX_BRIGHTNESS)
pygame.draw.rect(screen, color, pygame.Rect(self.x, self.y, self.width, self.height))
class Text():
def __init__(self, text, font, x=0, y=0, color=(255, 255, 255)):
self.color = color
self.text = font.render(text, True, self.color)
self.inner_text = text
self.anchor = (0, 0.5)
self.SetPosition(x, y)
self.font = font
def SetPosition(self, x, y):
self.x = x
self.y = y
self.position = (self.x - self.text.get_width()*self.anchor[0], self.y - self.text.get_height()*self.anchor[1])
def render(self, screen):
screen.blit(self.text, self.position)
def setAnchor(self, x, y):
self.anchor = (x, y)
self.SetPosition(self.x, self.y)
def setText(self, text):
self.inner_text = text
self.text = self.font.render(text, True, self.color)
def render_multiline(self, screen):
words = [word.split(' ') for word in self.inner_text.splitlines()] # 2D array where each row is a list of words.
space = self.font.size(' ')[0] # The width of a space.
max_width, max_height = screen.get_size()
x, y = self.position
for line in words:
for word in line:
word_surface = self.font.render(word, 0, self.color)
word_width, word_height = word_surface.get_size()
if x + word_width >= max_width:
x = self.position[0] # Reset the x.
y += word_height # Start on new row.
screen.blit(word_surface, (x, y))
x += word_width + space
x = self.position[0] # Reset the x.
y += word_height # Start on new row.
class LCD():
def __init__(self, font, x, y, color):
self.lines = []
self.lines.append(Text("0123456789ABCDEF", font, x, y, color))
self.lines.append(Text("FEDCBA9876543210", font, x, y+20, color))
def setText(self, line, text):
self.lines[line].setText(text)
def render(self, screen):
self.lines[0].render(screen)
self.lines[1].render(screen)
def handlecommand(command, data):
global pixels
global brightness
global printer_text
data = list(data)
if command == 0x12:
length = data[0]
for index in range(0, length):
i = index*4 + 1
pixels[data[i]].color = (data[i+1], data[i+2], data[i+3])
pixels[data[i]].status = True
if command == 0x13:
i = 0
color = (0,0,0)
set_color = True
read_once = False
while i < len(data):
value = data[i]
if set_color:
color = (data[i], data[i+1], data[i+2])
i += 2
set_color = False
read_once = False
elif value == 0 and read_once:
set_color = True
else:
pixels[value+MATRIX_START_ID].color = color
read_once = True
i += 1
if command == 0x04:
for index in range(33,97):
pixels[index].color = (15, 15, 15)
if command == 0x05:
brightness = data[0]
if command == 0x91:
lcd_id = data[0]
lcd_line = data[1]
message = bytearray(data[2:]).decode()
LCDS[lcd_id].setText(lcd_line - 1, message)
if command == 0x92:
message = bytearray(data).decode()
printer_text.setText(message)
def addNeopixelAt(x, y):
led = Neopixel(x, y)
led.status = False
pixels.append(led)
return led
def run():
global pixels
global LCDS
global printer_text
pygame.init()
screen = pygame.display.set_mode((640, 400))
clock = pygame.time.Clock()
running = True
font = pygame.font.Font("../assets/fonts/VCR_OSD_MONO_1.001.ttf", 18)
printer_font = pygame.font.Font("../assets/fonts/VCR_OSD_MONO_1.001.ttf", 14)
lcd0 = LCD(font, 50, 164, (160,200,190))
lcd0.setText(0, "> MATERIAL A ---")
LCDS.append(lcd0)
lcd1 = LCD(font, 50, 224, (160,200,190))
lcd1.setText(0, "> MATERIAL B ---")
LCDS.append(lcd1)
lcd2 = LCD(font, 50, 284, (160,200,190))
lcd2.setText(0, "> MATERIAL C ---")
LCDS.append(lcd2)
lcd3 = LCD(font, 390, 164, (160,200,190))
lcd3.setText(0, "--- MATERIAL D <")
LCDS.append(lcd3)
lcd4 = LCD(font, 390, 224, (160,200,190))
lcd4.setText(0, "--- MATERIAL E <")
LCDS.append(lcd4)
lcd5 = LCD(font, 390, 284, (160,200,190))
lcd5.setText(0, "--- MATERIAL F <")
LCDS.append(lcd5)
printer_text = Text("texto en varias\nlineas...\npara probar que sirva\n... ojala".upper(), printer_font, 40, 40, (255, 255, 198))
# material A side 0 - 3
addNeopixelAt(10, 160)
addNeopixelAt(10, 220)
addNeopixelAt(10, 280)
addNeopixelAt(120, 360)
# material B side 4 - 7
addNeopixelAt(590, 160)
addNeopixelAt(590, 220)
addNeopixelAt(590, 280)
addNeopixelAt(500, 360)
# ring 8 - 23
interval = math.pi/8
radius = 40
for index in range(0, 16):
i = radius*math.cos(interval*index+math.pi)
j = radius*math.sin(interval*index+math.pi)
led = addNeopixelAt(500+i, 80+j)
led.width = 10
led.height = 10
#selected material 24 - 27
addNeopixelAt(245, 10)
addNeopixelAt(285, 10)
addNeopixelAt(325, 10)
addNeopixelAt(365, 10)
# optimization 28 - 32
addNeopixelAt(265, 270)
addNeopixelAt(285, 305)
addNeopixelAt(305, 270)
addNeopixelAt(325, 305)
addNeopixelAt(345, 270)
# matrix 33 - 96
for index in range(0,64):
j = index//8
i = index%8
led = addNeopixelAt(272+i*12, 160+j*12)
led.width = 10
led.height = 10
# server configuration
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 6060)
sock.bind(server_address)
sock.listen(0)
sock.settimeout(0.1)
connection = None
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
alt_pressed = True
if event.key == pygame.K_ESCAPE:
running = False
elif event.key == pygame.K_F4 and alt_pressed:
running = False
# stablish connection
try:
if not connection:
connection, client_address = sock.accept()
print("connection from", client_address)
else:
data = connection.recv(3)
if len(data)>0:
command = data[1]
data = connection.recv(data[2])
handlecommand(command, data)
if command == 0x90:
connection.close()
connection = None
print("connection dropped!")
except socket.timeout:
pass
# render process
screen.fill((0,0,0))
for led in pixels:
led.render(screen)
lcd0.render(screen)
lcd1.render(screen)
lcd2.render(screen)
lcd3.render(screen)
lcd4.render(screen)
lcd5.render(screen)
printer_text.render_multiline(screen)
pygame.display.flip()
clock.tick(60)
if connection:
connection.close()
if __name__ == '__main__':
run()
|
HOST = "127.0.0.1"
HOST_PUB = "sonata4.local"
CLIENT_ID = "gateway2"
CLIENT_ID_PUB = "gateway2_pub"
RULES_FOLDER = "resources/json_rules/"
SUB_TOPIC = '#'
HB_TOPIC = '/heart_beat'
RULES_TOPIC = '/SM/rule'
IN_TOPICS_TOPIC = '/gateways/in_topics'
OUT_TOPICS_TOPIC = '/gateways/out_topics'
HB_TIMER = 5
MAX_MEM = 2000000
GATEWAY_NAME = 'gateway-pi2.local'
DEVICE_AC = []
DEVICE_TMP = []
DEVICE_HUM = []
DEVICE_LUX = []
DEVICE_MOTION = []
DEVICE_LIGHT = []
DEVICE_LIGHT_B = []
DEVICE_LIGHT_C = []
DEVICE_LIGHT_S = []
for i in range(77,83):
DEVICE_LIGHT.append('light%d'%i)
for i in range(73,76):
DEVICE_MOTION.append('motion%d'%i)
for i in range(72,75):
DEVICE_LUX.append('lux%d'%i)
"""
for i in range(1,46):
DEVICES.append('ac%d'%i)
for i in range(1,50):
DEVICES.append('tmp%d'%i)
for i in range(1,50):
DEVICES.append('hum%d'%i)
for i in range(1,157):
DEVICE_LUX.append('lux%d'%i)
for i in range(1,165):
DEVICE_MOTION.append('motion%d'%i)
for i in range(1,109):
DEVICE_LIGHT.append('light%d'%i)
for i in range(1,34):
DEVICES.append('light_b%d'%i)
for i in range(1,10):
DEVICES.append('light_s%d'%i)
for i in range(1,77):
DEVICE_LIGHT_C.append('light_c%d'%i)
"""
|
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ephemeral.build_api import LibFunction
logger = logging.getLogger(__name__)
class JobTask(object):
def __init__(self, name: str, type: str, lib_function: 'LibFunction', parent: 'JobTask'):
self.name = name
self.type = type
self.lib_function = lib_function
self.parent = parent
self.consumes = None
self.produces = None
self.init_args = {}
self._task_created_callback = None
def new_task_created(self, new_task):
if self._task_created_callback:
return self._task_created_callback(new_task)
else:
raise RuntimeError('No _task_created_callback has been set!')
def set_task_created_callback(self, cb):
self._task_created_callback = cb
def to_dict(self):
return {
'name': self.name,
'type': self.type,
'method': self.lib_function.method_str,
'namespace': self.lib_function.namespace,
'consumes': '---',
'produces': '----',
'init_args': self.init_args
}
|
from pulp import *
import numpy as np
import random as rd
## Paramètres
n=3 #nombre de patients
p=3 #nombre de créneaux
pref= [[2,1,2],[1,2,3],[3,3,1]] #pref est la matrice des préférences des patients : pref[k][i] contient le rang assigné par le patient i au créneau k
## Programme linéaire
#problème d'indices dans le plne suivant
t=[(i,j) for i in range(n) for j in range(p)]
creneaux = [i for i in range(p)]
x= LpVariable.dicts('créneaux',creneaux,0, 1,LpInteger)
y=LpVariable.dicts('assign',t,0,1,LpBinary)
prob = LpProblem("Prise de rendez-vous",pulp.LpMinimize)
prob += lpSum([pref[j][i]*y[(i,j)] for (i,j) in t])
for i in range(n):
prob+=lpSum(y[(i,j)] for j in range(p))==1
for j in range(p):
prob+=lpSum(y[(i,j)] for i in range(n))==x[j]
prob.solve()
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import shutil
import os
import sys
from musclex import __version__
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
def download_zip_pickles(dirpath):
"""
Download the pickle files from SourceForge, unzip them and put them in the right folder for testing
"""
if getattr(sys, 'frozen', False):
direc_path = dirpath
else:
direc_path = os.path.join(dirpath, "tests")
url = "https://sourceforge.net/projects/musclex/files/pickle_tests_v" + __version__ + ".zip/download"
if os.path.exists(os.path.join(direc_path, "di")):
print("Pickle files have already been downloaded.")
else:
print("Downloading and unzipping pickle files for testing...")
try:
with urlopen(url) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(direc_path)
except Exception:
print("Error during downloading or unzipping, check your internet connection and retry.")
print("Moving files to testing folder...")
shutil.move(os.path.join(direc_path, "pickle_tests_v" + __version__, "di"),
os.path.join(direc_path, "di"))
shutil.move(os.path.join(direc_path, "pickle_tests_v" + __version__, "dc"),
os.path.join(direc_path, "dc"))
shutil.move(os.path.join(direc_path, "pickle_tests_v" + __version__, "eq"),
os.path.join(direc_path, "eq"))
shutil.move(os.path.join(direc_path, "pickle_tests_v" + __version__, "qf"),
os.path.join(direc_path, "qf"))
shutil.move(os.path.join(direc_path, "pickle_tests_v" + __version__, "pt"),
os.path.join(direc_path, "pt"))
print("Cleaning download files...")
if os.path.exists(os.path.join(dirpath, "tests", "pickle_tests_v" + __version__)):
shutil.rmtree(os.path.join(dirpath, "tests", "pickle_tests_v" + __version__))
print("Done.")
|
#!/usr/bin/env python3
"""
- включать триггер и возвр его состиояние
- чек статус процесса на этипах: память и стейт, опц имя
"""
from time import sleep
from sys import stdout, stderr, argv, exit
pid = argv[1]
s = 0.05
k = 0.95
def rline1(path):
"""read 1st line from path."""
with open(path) as f:
for line in f:
return line.rstrip()
def write(path, string):
"""
"""
with open(path, 'w') as f:
f.write(string)
def pid_to_rss(pid):
"""
"""
rss = rline1('/proc/{}/statm'.format(pid)).split(' ')[1]
return rss
def is_alive(pid):
"""
"""
rss = pid_to_rss(pid)
if rss == '0':
return False
else:
return True
def trigger():
"""
"""
write('/proc/sysrq-trigger', 'f')
def write_score(pid):
"""
"""
write('/proc/{}/oom_score_adj'.format(pid), '1000')
print('PID:', pid)
print('Set oom_score_adj=1000')
write_score(pid)
while True:
print('Trigger OOMK! Sleep', s)
trigger()
sleep(s)
s = s * k
try:
x = is_alive(pid)
except Exception as e:
print(e)
exit()
if not x:
print('VmRSS=0, exit')
exit()
|
import base64
import cv2
import zmq
import sys
import argparse
import multiprocessing as mp
#########################################################################################################################
# https://stackoverflow.com/questions/4290834/how-to-get-a-list-of-video-capture-devices-web-cameras-on-linux-ubuntu-c
# You can use the following bash command:
# v4l2-ctl --list-devices
#In order to use the above command, you must install package v4l-utils before. In Ubuntu/Debian you can use the command:
# sudo apt-get install v4l-utils
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 Streaming Demo")
parser.add_argument("--ip", help="ip address to stream to")
parser.add_argument(
"--count",
type=int,
default=5,
help="default amount of framecaptures to stream",
)
parser.add_argument(
"--height",
type=int,
default=800,
help="default amount of time to stream for",
)
parser.add_argument(
"--width",
type=int,
default=600,
help="default amount of time to stream for",
)
parser.add_argument(
"--port",
type=int,
default=5555,
help="default remote port to connect to",
)
return parser
args = get_parser().parse_args()
#ip = args.ip
count = args.count
print(count)
counter = 0
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.connect('tcp://' + args.ip + ":" + str(args.port))
# initialize the first camera
camera = cv2.VideoCapture(0) # init the first camera
while counter != count:
print("camera 1")
try:
topic = "camera1"
grabbed, frame = camera.read() # grab the current frame
frame = cv2.resize(frame, (args.height,args.width)) # resize the frame
footage_socket.send_string(topic, zmq.SNDMORE)
footage_socket.send_pyobj(frame)
#encoded, buffer = cv2.imencode('.jpg', frame)
#jpg_as_text = base64.b64encode(buffer)
#footage_socket.send(jpg_as_text)
except KeyboardInterrupt:
camera.release()
cv2.destroyAllWindows()
break
counter+=1
print(counter)
camera.release()
counter = 0 |
from ete2 import NCBITaxa
from joblib import Parallel, delayed
from os.path import join as pjoin
import os
from tqdm import tqdm
def mash(info):
genome = info
genome.compute_mash_hash()
def checkm(info):
genome = info
genome.compute_checkm()
def prokka(info):
genome = info
genome.prokka()
class Database(object) :
def __getitem__(self, key):
if type(key) == int :
return self.genomes[key]
else :
li = [g for g in self.genomes if g.name == key]
assert len(li) < 2, "There are more than one genomes with the name " + key
if len(li) ==1 :
return li[0]
else :
return None
def __init__(self, data_path, workbench = None, genomes = [], taxDb = None):
self.data_path = data_path
self.workbench = workbench
self.metadata_path = pjoin(self.data_path, "metadata")
if not os.path.exists(self.metadata_path):
os.makedirs(self.metadata_path)
self.metadata_file = pjoin(self.metadata_path, "metadata.csv")
if taxDb:
self.taxDb = taxDb
else :
self.taxDb = NCBITaxa()
self.genomes = genomes
def process(self, num_cores = 10):
print "Running prokka for protein annotation (excedpt if faas already provided)"
to_prokka = [g for g in self.genomes if not os.path.exists(g.proteom)]
prokka_stuff = Parallel(n_jobs=num_cores)(delayed(prokka)(i) for i in tqdm(to_prokka))
to_mash = [g for g in self.genomes if not os.path.exists(g.genome + ".msh")]
print "running mash hashing"
mashstuff= Parallel(n_jobs=num_cores)(delayed(mash)(i) for i in tqdm(to_mash))
print "running CheckM"
to_check = [g for g in self.genomes if not os.path.exists(g.genome.replace(".fna",".checkm.json")) or not g.checkm_meta]
checkmstuff= Parallel(n_jobs=num_cores)(delayed(checkm)(i) for i in tqdm(to_check))
print "computing genome sizes"
for g in tqdm(self.genomes):
if not g.size:
g.compute_size()
print "computing gc contents"
for g in tqdm(self.genomes):
if not g.size:
g.compute_gc()
print "making fake reads"
for g in tqdm(self.genomes):
if not os.path.exists(g.fakereads):
g.make_fake_reads(read_len=150)
|
import random
n = 10000000
Afehler = 0
fehler = 0
for i in range(n):
a = random.random()
b = random.random()
data_cls = random.random()
d = random.random()
if (a < 1 / 3 and not (b < 1 / 3 or data_cls < 1 / 3 or d < 1 / 3)):
Afehler += 1
if (a < 1 / 3 or b < 1 / 3 or data_cls < 1 / 3 or d < 1 / 3):
fehler += 1
print("n: ", n, "\nonly A fails: ", Afehler, "\n")
print("failure: ", fehler, "\np(only A fails | failure): ", Afehler / fehler)
|
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution1D, Flatten, Dense, \
Input, Lambda, Activation, Reshape, Multiply, Add, Concatenate
from tensorflow.keras import backend as K
def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate):
def f(input_):
residual = input_
tanh_out = Convolution1D(n_atrous_filters, atrous_filter_size,
dilation_rate=atrous_rate,
padding='same',
activation='tanh')(input_)
sigmoid_out = Convolution1D(n_atrous_filters, atrous_filter_size,
dilation_rate=atrous_rate,
padding='same',
activation='sigmoid')(input_)
merged = Multiply()([tanh_out, sigmoid_out])
skip_out = Convolution1D(1, 1, activation='relu', padding='same')(merged)
out = Add()([skip_out, residual])
return out, skip_out
return f
def sampling(args):
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
class DenoiserModel:
encoder = None
denoiser = None
VD = None
def build_model(self, input_size):
input = Input(shape=(input_size, 1))
# WaveNet Variational Encoder
out, skip_out = wavenetBlock(64, 2, 2)(input)
skip_connections = [skip_out]
for i in range(20):
out, skip_out = wavenetBlock(64, 2, 2**((i+2)%9))(out)
skip_connections.append(skip_out)
encoder_net = Add()(skip_connections)
encoder_net = Convolution1D(1, 1)(encoder_net)
encoder_net = Flatten()(encoder_net)
z_mean = Dense(input_size)(encoder_net)
z_log_var = Dense(input_size)(encoder_net)
z = Lambda(sampling, output_shape=(input_size, 1))([z_mean, z_log_var])
# WaveNet Denoiser
z = Reshape(target_shape=(input_size, 1))(z)
denoiser_input = Concatenate()([input, z])
out, skip_out = wavenetBlock(64, 2, 2)(denoiser_input)
skip_connections = [skip_out]
for i in range(20):
out, skip_out = wavenetBlock(64, 2, 2**((i+2)%9))(out)
skip_connections.append(skip_out)
denoiser_net = Add()(skip_connections)
denoiser_net = Convolution1D(1, 1)(denoiser_net)
denoiser_net = Convolution1D(3, 1)(denoiser_net)
denoiser_net = Flatten()(denoiser_net)
denoiser_net = Dense(input_size, activation='tanh')(denoiser_net)
denoiser_net = Reshape(target_shape=(input_size, 1), name='denoise')(denoiser_net)
model = Model(inputs=input,
outputs=[Concatenate(name='kl')([z_mean, z_log_var]), denoiser_net])
# Concat z_mean with z_log_var because
# fit function needs the loss function per losses.
# We need to culc two losses which are kl_loss and denoise_loss.
# model.summary()
return model
|
import json
import time
from urllib import parse
import requests
from prettytable import PrettyTable
from config import config_data
class Query:
def __init__(self,session):
self.session = session
self.config = config_data
self.chezhan_code = self.chezhan()
self.from_city_name = self.config['stations']['left']
self.to_city_name = self.config['stations']['arrive']
self.from_station = self.encoding_station(self.from_city_name)
self.to_station = self.encoding_station(self.to_city_name)
self.from_station_code = self.chezhan_code[self.from_city_name]
self.to_station_code = self.chezhan_code[self.to_city_name]
self.date = self.config['left_dates'][0]
self.add_station_cookie()
def chezhan(self):
f = open('chezhan.txt', 'r')
chezhan_code = eval(f.read())
return chezhan_code
def encoding_station(self,city_name):
station_name = "{}{}".format(str(city_name.encode('unicode-escape'),
encoding="utf-8").replace("\\", "%")
+ parse.quote(","),self.chezhan_code[city_name])
return station_name
def add_station_cookie(self):
buycookies = {
"_jc_save_fromStation": self.from_station,
"_jc_save_toStation": self.to_station,
"_jc_save_fromDate": self.date,
"_jc_save_toDate": time.strftime("%Y-%m-%d", time.localtime()),
"_jc_save_wfdc_flag": "dc"
}
requests.utils.add_dict_to_cookiejar(self.session.cookies, buycookies)
def Query(self):
url = "https://kyfw.12306.cn/otn/leftTicket/query?" \
"leftTicketDTO.train_date={}&" \
"leftTicketDTO.from_station={}&" \
"leftTicketDTO.to_station={}&" \
"purpose_codes=ADULT".format(self.date,
self.from_station_code,
self.to_station_code)
r = self.session.get(url)
try:
result = json.loads(r.text)
except:
print("无查询结果")
return None
for i in result['data']['result']:
item = i.split('|')
if item[3] == self.config['train_code'][0]:
xd_data = {
"xd_code": item[0],
"train_no": item[2],
"stationTrainCode": item[3],
"leftTicket": item[12],
"train_location": item[15],
}
data = {
"swz_num": item[32] or item[25], # 商务座
"ydz_num": item[31], # 一等座
"edz_num": item[30], # 二等座
"gjrw_num": item[21], # 高级软卧
"rw_num": item[23], # 软卧
"dw_num": item[27], # 动卧
"yw_num": item[28], # 硬卧信息在28号位置
"rz_num": item[24], # 软座信息在24号位置
"yz_num": item[29], # 硬座信息在29号位置
"wz_num": item[26], # 无座信息在26号位置
}
Seat_type_parse = {
'商务座': 'swz_num',
'一等座': 'ydz_num',
'二等座': 'edz_num',
'高级软卧': 'gjrw_num',
'软卧': 'rw_num',
'动卧': 'dw_num',
'硬卧': 'yw_num',
'软座': 'rz_num',
'硬座': 'yz_num',
'无座': 'wz_num',
'其他信息': 'qt_num'
}
for key, value in data.items():
if value == "无":
data[key] = ""
seat_num = Seat_type_parse[self.config['seats'][0]]
if data[seat_num]:
if data[seat_num] == "有":
print("{}当前还有位置".format(self.config['seats'][0]))
return self.session,xd_data
else:
print("{}当前还有{}个位置".format(self.config['seats'][0],data[seat_num]))
return self.session,xd_data
else:
print("{}当前没有位置".format(self.config['seats'][0]))
time.sleep(60)
return self.Query
|
import requests
def get_some_data_from_api():
url = "https://api.icndb.com/jokes/random?firstName=John&lastName=Doe"
# implement some code here
print(url)
pass
|
#!/usr/bin/env python
#
# Copyright (c) 2011 Polytechnic Institute of New York University
# Author: Adrian Sai-wah Tam <adrian.sw.tam@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of New York University.
#
# This program takes the output of routeecmp.py as input, which it is in the format of
# <time> <linkid> <load>
# This program analyze the time-ordered link load data and outputs the maximum and minimum link load at any time.
#
import getopt,sys
###########################################################
# Global parameters
routefile = 'route.txt' # default input file
maxfile = 'max.txt'
minfile = 'min.txt'
#random.seed(1) # Debug use: Uncomment this line for repeatible random numbers
optlist, userlist = getopt.getopt(sys.argv[1:], 'i:M:m:h')
for opt, optarg in optlist:
if opt == '-i':
routefile = optarg
elif opt == '-M':
maxfile = optarg
elif opt == '-m':
minfile = optarg
else:
# getopt will fault for other options
print "Available options"
print " -i file : Input file, default is route.txt"
print " -M file : Output of maximum link load, default is max.txt"
print " -m file : Output of minimum link load, default is min.txt"
print " -h : This help message"
sys.exit(1)
###########################################################
infile = open(routefile, "r")
outmax = open(maxfile, "w")
outmin = open(minfile, "w")
loads = []
oldmax = None
oldmin = None
clock = 0
for line in infile:
token = line.split()
if len(token) != 3: continue
time, link, load = float(token[0]), int(token[1]), float(token[2])
if len(loads) == link:
loads.append(load)
else:
loads[link] = load
if clock < time:
clock = time
maxload = max(loads)
minload = min(loads)
if oldmax != maxload:
#if oldmax != None:
# print >>outmax, "%f %f" % (time,oldmax)
print >>outmax, "%f %f" % (time,maxload)
oldmax = maxload
if oldmin != minload:
#if oldmin != None:
# print >>outmin, "%f %f" % (time,oldmin)
print >>outmin, "%f %f" % (time,minload)
oldmin = minload
infile.close()
outmax.close()
outmin.close()
|
# -*- coding: utf-8 -*-
'''
Created on 07-08-2013
@author: Krzysztof Langner
'''
from collections import defaultdict
import json
import os.path
PREVIEW_LOG_SIZE = 30000
def read_sessions(filename):
sessions = defaultdict(list)
try:
with open(filename, "r") as f:
for line in f:
event = json.loads(line)
session_name = event['session']
sessions[session_name].append(event)
except IOError:
pass
return sessions
def read_recent_sessions(filename):
sessions = defaultdict(list)
try:
with open(filename, "r") as f:
size = os.path.getsize(filename)
if size > PREVIEW_LOG_SIZE:
f.seek(size-PREVIEW_LOG_SIZE)
f.readline()
for line in f.readlines():
event = json.loads(line)
session_name = event['session']
sessions[session_name].append(event)
except IOError:
pass
sessions.default_factory = None
return sessions
def read_folder_sessions(folder):
sessions = defaultdict(list)
for filename in os.listdir(folder):
with open(folder+filename, "r") as f:
for line in f:
event = json.loads(line)
session_name = event['session']
sessions[session_name].append(event)
return sessions |
import z
import buy
stocks = z.getp("listofstocks")
from sortedcontainers import SortedSet
# shrinking outstanding shares and increasing marketcap with slight volume
sset = SortedSet()
for astock in stocks:
try:
if buy.getFrom("latestmc", astock, None) > 1500:
continue
yearagomc_size = buy.getFrom("yearagomc", astock)
mc_size = buy.getFrom("latest_mc", astock)
mcc = round(mc_size/yearagomc_size,4)
dr, ebit, r_value, slope = buy.getFrom("wlp_lasts", astock)
volr = buy.getFrom("voldic", astock)
if slope < -0.7 and abs(r_value) > 0.8 and mcc > 1.1 and volr < 1700:
buy.addSortedHigh("mc_os", mcc, astock, 30)
except Exception as e:
pass
bar = buy.getSorted("mc_os")
print("bar : {}".format( bar ))
buy.multiple(bar, runinit = True)
|
import picamera
import time
camera = picamera.PiCamera()
camera.resolution = (320, 160)
camera.rotation = 180
camera.start_preview()
time.sleep(5)
camera.capture('photo.jpg')
camera.stop_preview()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UrlTable(models.Model):
title = models.CharField(max_length=100, null=True, blank=True)
long_url = models.CharField(max_length=1000, null=True, blank=True)
short_hash= models.CharField(max_length=1000, null=True, blank=True)
no_clicks= models.IntegerField(default=0,null=True, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"{self.title}" |
#
# cogs/text/meme.py
#
# mawabot - Maware's selfbot
# Copyright (c) 2017 Ma-wa-re, Ammon Smith
#
# mawabot is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
''' Has commands for meme-y text transformation '''
import asyncio
import logging
import random
import re
import subprocess
import discord
from discord.ext import commands
__all__ = [
'Meme',
]
CHECK_EM_URL = 'https://media.discordapp.net/attachments/336147052855558148/357986515030376458/check-em.jpg'
BAD_CHECK_EM_URL = 'https://cdn.discordapp.com/attachments/287311630880997377/332092380738224128/raw.gif'
OFF_BY_ONE_URL = 'https://cdn.discordapp.com/attachments/336147052855558148/357987379283361802/0d6.png'
DISCORD_STRINGS = re.compile(r'(<\S*>)')
logger = logging.getLogger(__name__)
class Meme:
__slots__ = (
'bot',
'recent_messages',
'regional_emojis',
)
def __init__(self, bot):
self.bot = bot
self.recent_messages = set()
self.regional_emojis = {
'a': '\N{REGIONAL INDICATOR SYMBOL LETTER A}',
'b': '\N{REGIONAL INDICATOR SYMBOL LETTER B}',
'c': '\N{REGIONAL INDICATOR SYMBOL LETTER C}',
'd': '\N{REGIONAL INDICATOR SYMBOL LETTER D}',
'e': '\N{REGIONAL INDICATOR SYMBOL LETTER E}',
'f': '\N{REGIONAL INDICATOR SYMBOL LETTER F}',
'g': '\N{REGIONAL INDICATOR SYMBOL LETTER G}',
'h': '\N{REGIONAL INDICATOR SYMBOL LETTER H}',
'i': '\N{REGIONAL INDICATOR SYMBOL LETTER I}',
'j': '\N{REGIONAL INDICATOR SYMBOL LETTER J}',
'k': '\N{REGIONAL INDICATOR SYMBOL LETTER K}',
'l': '\N{REGIONAL INDICATOR SYMBOL LETTER L}',
'm': '\N{REGIONAL INDICATOR SYMBOL LETTER M}',
'n': '\N{REGIONAL INDICATOR SYMBOL LETTER N}',
'o': '\N{REGIONAL INDICATOR SYMBOL LETTER O}',
'p': '\N{REGIONAL INDICATOR SYMBOL LETTER P}',
'q': '\N{REGIONAL INDICATOR SYMBOL LETTER Q}',
'r': '\N{REGIONAL INDICATOR SYMBOL LETTER R}',
's': '\N{REGIONAL INDICATOR SYMBOL LETTER S}',
't': '\N{REGIONAL INDICATOR SYMBOL LETTER T}',
'u': '\N{REGIONAL INDICATOR SYMBOL LETTER U}',
'v': '\N{REGIONAL INDICATOR SYMBOL LETTER V}',
'w': '\N{REGIONAL INDICATOR SYMBOL LETTER W}',
'x': '\N{REGIONAL INDICATOR SYMBOL LETTER X}',
'y': '\N{REGIONAL INDICATOR SYMBOL LETTER Y}',
'z': '\N{REGIONAL INDICATOR SYMBOL LETTER Z}',
'0': '0\N{COMBINING ENCLOSING KEYCAP}',
'1': '1\N{COMBINING ENCLOSING KEYCAP}',
'2': '2\N{COMBINING ENCLOSING KEYCAP}',
'3': '3\N{COMBINING ENCLOSING KEYCAP}',
'4': '4\N{COMBINING ENCLOSING KEYCAP}',
'5': '5\N{COMBINING ENCLOSING KEYCAP}',
'6': '6\N{COMBINING ENCLOSING KEYCAP}',
'7': '7\N{COMBINING ENCLOSING KEYCAP}',
'8': '8\N{COMBINING ENCLOSING KEYCAP}',
'9': '9\N{COMBINING ENCLOSING KEYCAP}',
'!': '\N{HEAVY EXCLAMATION MARK SYMBOL}',
'?': '\N{BLACK QUESTION MARK ORNAMENT}',
}
self.bot.add_listener(self.on_message)
async def on_message(self, message):
''' Handling for text-based messages '''
if message.id in self.recent_messages:
return
else:
self.recent_messages.add(message.id)
if len(self.recent_messages) > 10:
self.recent_messages.pop()
if message.author == self.bot.user and message.content == 'oh no.':
logger.info(f"Sending 'oh no.' for {message.id}")
await self._ohno(message.channel)
def _regional_indicators(self, text, big=False):
''' Helper that formats input text into regional indicators '''
# Note, can't pass in sep directly, causes a TypeError
# Something else is probably passing something called sep in automatically
sep = ' ' if big else '\u200b'
def mapper(s):
if s.startswith('<'):
return s
return sep.join(self.regional_emojis.get(c.lower(), c) for c in s)
return ''.join(map(mapper, DISCORD_STRINGS.split(text)))
@commands.command(aliases=['ri'])
async def regional_indicators(self, ctx, *, text: str):
''' Makes the whole message into regional_indicator emojis '''
content = self._regional_indicators(text)
await asyncio.gather(
ctx.send(content=content),
ctx.message.delete(),
)
@commands.command(aliases=['ril'])
async def regional_indicators_large(self, ctx, *, text: str):
''' Same as regional_indicators except the letters come out larger '''
content = self._regional_indicators(text, big=True)
await asyncio.gather(
ctx.send(content=content),
ctx.message.delete(),
)
@commands.command(aliases=['sw'])
async def spacewords(self, ctx, *, text: str):
''' Spaces out words '''
content = ' . '.join(' '.join(word) for word in text.split(' '))
await ctx.message.edit(content=content)
@commands.command(aliases=['cw'])
async def crossword(self, ctx, *, text: str):
''' "Crossword"-ifys the given text '''
text = text.upper()
lines = [text] + list(text[1:])
await ctx.message.edit(content='\n'.join(lines))
@commands.command()
async def kerrhau(self, ctx, *text: str):
''' "kerrhau"-ifys the given text '''
text = list(text)
words = []
while text:
word = []
for _ in range(random.randint(1, 3)):
if text:
word.append(text.pop(0))
words.append(' '.join(word))
last = words[-1][-1]
words[-1] = words[-1][:-1]
words.append(last)
await ctx.message.edit(content='\n'.join(words))
@commands.command()
async def clap(self, ctx, *, text: str):
''' Replaces spaces with the clap emoji 👏 '''
content = ' 👏 '.join(text.upper().split())
await ctx.message.edit(content=content)
@commands.command()
async def clap2(self, ctx, *, text: str):
''' Clap variant that starts and ends with claps too '''
content = ''.join(f'👏 {word}' for word in text.upper().split())
await ctx.message.edit(content=content + ' 👏')
@staticmethod
def _cowsay(args, text):
text = text.replace('\n', '\n\n').replace("```", "'''")
args.append(text)
output = subprocess.check_output(args, stderr=subprocess.DEVNULL, timeout=0.5)
content = '\n'.join((
'```',
output.decode('utf-8'),
'```',
))
return content
@commands.command()
async def cowsay(self, ctx, *, text: str):
''' Replaces the given text with cowsay '''
content = self._cowsay(['cowsay'], text)
await ctx.message.edit(content=content)
@commands.command()
async def cowthink(self, ctx, *, text: str):
''' Replaces the given text with cowthink '''
content = self._cowsay(['cowthink'], text)
await ctx.message.edit(content=content)
@commands.command()
async def cowcustom(self, ctx, cowfile: str, *, text: str):
''' Replaces the given text with the given cow file '''
content = self._cowsay(['cowsay', '-f', cowfile], text)
await ctx.message.edit(content=content)
@staticmethod
async def _ohno(sendable):
''' oh no. '''
url = f'https://www.raylu.net/f/ohno/ohno{random.randint(1, 53)}.png'
embed = discord.Embed().set_image(url=url)
await sendable.send(embed=embed)
@commands.command()
async def ohno(self, ctx):
''' Bot command /ohno '''
await asyncio.gather(
self._ohno(ctx),
ctx.message.delete(),
)
@staticmethod
def is_dubs(num):
return (num % 100) % 11 == 0
@commands.command(aliases=['dubs', 'trips'])
async def checkem(self, ctx):
''' Check 'em! '''
number = random.randint(1, 10 ** 16)
embed = discord.Embed(type='rich', description=f'```{number}```')
embed.set_footer(
text='Brought to you by the anti-semitic frog foundation',
icon_url='https://i.imgur.com/Gn3vKn6.png',
)
if self.is_dubs(number):
embed.set_image(url=CHECK_EM_URL)
elif self.is_dubs(number + 1) or self.is_dubs(number - 1):
embed.set_image(url=OFF_BY_ONE_URL)
else:
embed.set_image(url=BAD_CHECK_EM_URL)
await ctx.send(embed=embed)
|
# @see https://adventofcode.com/2015/day/1
instructions = ''
with open('day1_input.txt', 'r') as fp:
instructions = fp.readline()
# At what floor does Santa stop?
def last_floor(l: str):
floor = 0
for x in l:
if x == '(':
floor += 1
else:
floor -= 1
return floor
# At which instruction does Santa enter the
# basement for the first time?
def enters_basement_at(l: str):
floor, steps = 0, 0
for x in l:
steps += 1
if x == '(':
floor += 1
else:
floor -= 1
if floor == -1:
break
return steps
print('------------ PART 01 -------------')
print('Floor number: ', last_floor(instructions))
print('------------ PART 02 -------------')
print('Enters basement at: ', enters_basement_at(instructions)) |
"""
* \author Hugo Silva
* \version 1.0
* \date July 2014
*
* \section LICENSE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import pylab
import traceback
from bitalino import *
from sys import exit
from txws import WebSocketFactory
from twisted.internet import protocol, reactor
def tostring(data):
"""
:param data: object to be converted into a JSON-compatible `str`
:type data: any
:return: JSON-compatible `str` version of `data`
Converts `data` from its native data type to a JSON-compatible `str`.
"""
dtype=type(data).__name__
if dtype=='ndarray':
if pylab.shape(data)!=(): data=list(data)
else: data='"'+data.tostring()+'"'
elif dtype=='dict' or dtype=='tuple':
try: data=json.dumps(data)
except: pass
elif dtype=='NoneType':
data=''
elif dtype=='str' or dtype=='unicode':
data=json.dumps(data)
return str(data)
class VS(protocol.Protocol):
def connectionMade(self):
"""
Callback executed when the client successfully connects to the server.
"""
print "CONNECTED"
# Notify the client that a connection has been established
self.transport.write('server.connected()')
def dataReceived(self, req):
"""
:param req: Python instruction sent by the client
:type req: str
Evaluates the instruction `req` sent by the client and responds with an identical instruction, in which the return value of that instruction is the input argument.
"""
try:
# Show the request on the terminal window
print '> ' + req
# Evaluate the request and retrieve the result
res = eval(req)
# If the request is to shutdown the server no further action is needed
if (req.find('shutdown')>=0):
return
# Place the result as an argument to the instruction received as the request
li=req.find('(')
li=li if li>=0 else None
res=req[:li]+'('+tostring(res)+');'
# Show the response on the terminal window
print '< ' + res
# Should an exceptio occur, the exception is propagated to the client
except Exception as e:
print traceback.format_exc()
res='sys.exception("'+str(e)+'")'
# Send the response to the client
self.transport.write(res)
def connectionLost(self, reason):
"""
Callback executed when the connection to the client is lost.
"""
server.shutdown()
return
class server(object):
@staticmethod
def BITalino(macAddress):
"""
:param macAddress: string with a BITalino MAC address or COM port
:type macAddress: str
:return: status of the connection
Proxy function that the client can use to initialize the connection to a BITalino device.
"""
global device
try:
device=BITalino(macAddress)
res=True
except Exception as e:
print traceback.format_exc()
res='sys.exception("'+str(e)+'")'
return res
@staticmethod
def shutdown():
"""
Utility function that the client can use to shutdown the server.
"""
connector.stopListening()
try: reactor.stop()
except: pass
print "DISCONNECTED"
class VSFactory(protocol.Factory):
def buildProtocol(self, addr):
return VS()
if __name__=='__main__':
try:
ip_addr, port = "127.0.0.1", 9001
device = None
print "LISTENING AT %s:%s"%(ip_addr, port)
connector = reactor.listenTCP(port, WebSocketFactory(VSFactory()))
reactor.run()
except Exception as e:
print traceback.format_exc()
|
#!/bin/python
# Description: Creates a trivial package file from a list of files
#
# The package file contains the source files concatenated, with headers that allows their
# easy extraction. The files starts with a number (long int) indicating the number of the individual
# files contained in the package. Following this number is a set of headers, each header describes
# one file in the package, and comprises of a filename, a file size and flags (reserved).
# After the headers, the source files are concatenated one after another.
#
# Invocation: pack.py [-o pakfile] file1 file2 ....
import sys, getopt
import struct, os
import os.path
MAGIC = 0xe411b783
if sys.argv[1] == '-o':
out_file = file(sys.argv[2], 'wb')
in_filename_list = sys.argv[3:]
else:
out_file = sys.stdout
in_filename_list = sys.argv[1:]
def create_header(filename, size, flags):
h = struct.pack("24sII", filename, size, flags)
return h
# The package starts with a magic number
out_file.write(struct.pack("I", MAGIC))
# Write the file number
num_files = len(in_filename_list)
out_file.write(struct.pack("I", num_files))
print("There are %d files in the package" % num_files)
# Write the headers
file_list = []
for ifn in in_filename_list:
# If the filename contains colon, the written filename is different
# from the source name
if ":" in ifn:
(source_fn, dest_fn) = ifn.split(':')
else:
source_fn = ifn
dest_fn = os.path.split(ifn)[1] # Remove the dir name
size = os.stat(source_fn).st_size
header = create_header(dest_fn, size, 0)
out_file.write(header)
file_list.append({
'source_fn': source_fn,
'dest_fn': dest_fn,
'size': size
})
print("Wrote header for %s (as %s), size %d" % (source_fn, dest_fn, size))
# Copy the source file content
for fe in file_list:
filename = fe['source_fn']
data = file(filename, 'rb').read()
out_file.write(data)
print("File %s has been written" % (filename,))
|
"""
Define fixtures to provide common functionality for Mimic testing
"""
from __future__ import absolute_import, division, unicode_literals
from mimic.test.helpers import json_request
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from twisted.internet.task import Clock
class TenantAuthentication(object):
"""
Provides some functionality to help log into mimic identity with a
particular username and password
"""
def __init__(self, test_case, root, username, password):
"""
Authenticate a particular user against the mimic root.
:param root: The :class:`twisted.web.resource.IResource` at the root
of the mimic API resource tree.
:param username: the username to authenticate as
:param password: the password with which to use to authenticate
"""
_, self.service_catalog_json = test_case.successResultOf(json_request(
test_case, root, b"POST", b"/identity/v2.0/tokens",
{
"auth": {
"passwordCredentials": {
"username": username,
"password": password,
},
}
}
))
def get_service_endpoint(self, service_name, region=''):
"""
Return the publicURL for the given service and region. Note that if there are multiple
endpoints for a given region, the first will be returned, and if no region is specified,
the first endpoint will be returned.
:param unicode service_name: The name of the service for which to get an endpoint as
listed in the service catalog
:param unicode region: The service catalog region of the desired endpoint
"""
for service in self.service_catalog_json['access']['serviceCatalog']:
if service['name'] == service_name:
for item in service['endpoints']:
if (item['region'] == region) or (region == ''):
return item['publicURL']
raise KeyError("No such service {}".format(service_name))
class APIMockHelper(object):
"""
Provides common functionality for mimic tests
"""
def __init__(self, test_case, apis):
"""
Initialize a mimic core and the specified :obj:`mimic.imimic.IAPIMock`s
:param apis: A list of :obj:`mimic.imimic.IAPIMock` objects to be initialized
"""
self.test_case = test_case
self.clock = Clock()
self.core = MimicCore(self.clock, apis)
self.root = MimicRoot(self.core).app.resource()
# Pass in arbitrary username and password
self.auth = TenantAuthentication(test_case, self.root,
"test1", "test1password")
# map some attributes and methods
self.service_catalog_json = self.auth.service_catalog_json
self.get_service_endpoint = self.auth.get_service_endpoint
tenant_id = self.auth.service_catalog_json["access"]["token"]["tenant"]["id"]
service_name = apis[0].catalog_entries(tenant_id)[0].name
self.uri = self.get_service_endpoint(service_name)
|
print("-------------------对象所属的类之间没有继承关系------------------------")
# 调用同一个函数fly(), 传入不同的参数(对象),可以达成不同的功能
class Duck(object): # 鸭子类
def fly(self):
print("鸭子沿着地面飞起来了")
class Swan(object): # 天鹅类
def fly(self):
print("天鹅在空中翱翔")
class Plane(object): # 飞机类
def fly(self):
print("飞机隆隆地起飞了")
def fly(obj): # 实现飞的功能函数
obj.fly()
duck = Duck()
fly(duck)
swan = Swan()
fly(swan)
plane = Plane()
fly(plane)
print("-------------------对象所属的类之间有继承关系(应用更广)------------------------")
class gradapa(object):
def __init__(self,money):
self.money = money
def p(self):
print("this is gradapa")
class father(gradapa):
def __init__(self,money,job):
super().__init__(money)
self.job = job
def p(self):
print("this is father,我重写了父类的方法")
class mother(gradapa):
def __init__(self, money, job):
super().__init__(money)
self.job = job
def p(self):
print("this is mother,我重写了父类的方法")
#定义一个函数,函数调用类中的p()方法
def fc(obj):
obj.p()
gradapa1 = gradapa(3000)
father1 = father(2000,"工人")
mother1 = mother(1000,"老师")
#这里的多态性体现是向同一个函数,传递不同参数后,可以实现不同功能.
fc(gradapa1)
fc(father1)
fc(mother1)
print('------------------------------')
class Animal(object): #编写Animal类
def run(self):
print("Animal is running...")
class Dog(Animal): #Dog类继承Amimal类,没有run方法
pass
class Cat(Animal): #Cat类继承Animal类,有自己的run方法
def run(self):
print('Cat is running...')
pass
class Car(object): #Car类不继承,有自己的run方法
def run(self):
print('Car is running...')
class Stone(object): #Stone类不继承,也没有run方法
pass
def run_twice(animal):
animal.run()
run_twice(Animal())
run_twice(Dog())
run_twice(Cat())
run_twice(Car())
run_twice(Stone()) |
import sys
def getfib(num1, num2, i):
if i < 3:
return 1
if i == 3:
return num1 + num2
return getfib(num2, num1 + num2, i - 1)
def getfibindex(i):
return getfib(1, 1, i)
sys.setrecursionlimit(6000)
print(getfibindex(4782))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 08:49:43 2020
@author: Administrator
"""
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.metrics.cluster import adjusted_rand_score as ari
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
X = pd.read_csv('yan/yan.csv',header=None)
X = np.array(X)
X = X.transpose()
label = pd.read_csv('yan/yan_label.csv')
y=np.array(label)
label = y.ravel()
pca=PCA(n_components=2)
A = pca.fit_transform(X)
c = label.max()
kk = KMeans(n_clusters=c)
julei = kk.fit(A)
julei = julei.labels_
print('NMI value is %f \n' % nmi(julei.flatten(),label.flatten()))
print('ARI value is %f \n' % ari(julei.flatten(),label.flatten()))
print('HOM value is %f \n' % metrics.homogeneity_score(julei,label))
print('AMI value is %f \n' % metrics.adjusted_mutual_info_score(label, julei))
|
def printLine():
print("-"*30)
def printLine_2(n):
i = 0
while i<n:
printLine()
i+=1
num = int(input("请输入循环次数:"))
printLine_2(num) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 15:32:36 2017
@author: dgratz
"""
import matplotlib.pyplot as plt
'''
read in files for CL graphs adn sync graphs
'''
from plotBeat2beatCLGrid import b2bCL
from plotSynchronyMeasure import b2bSync
b2bSTimes,b2bST,b2bSV=b2bSync('D:/synchrony-data/AllConnLogNormal/0.0/')
b2bCLX, b2bCLY = b2bCL('D:/synchrony-data/AllConnLogNormal/0.0')
for i in range(b2bCLX.shape[0]):
for j in range(b2bCLX.shape[1]):
plt.plot(b2bCLX[i,j],b2bCLY[i,j])
plt.plot(b2bSTimes,b2bST) |
from django.shortcuts import get_object_or_404
from celery.decorators import task
from celery.utils.log import get_task_logger
from .calculations.calculation_driver import create_quick_look
from django.contrib.auth.models import User
logger = get_task_logger(__name__)
@task(name="quicklook.create_quicklook")
def generate_quicklook(user_id,from_date,to_date):
'''
Celery task to generate quick look for give
date range
'''
try:
user = get_object_or_404(User, pk=user_id)
create_quick_look(user,from_date,to_date)
logger.info("Quick look generated successfully")
except Exception as e:
logger.error(str(e),exc_info=True) |
class Square:
side = 3
def __init__(self):
self.side = 0
def area(self):
return self.side * self.side
ob = Square()
print(Square.side)
print(Square.area(ob))
print(ob.side)
ob.side = 4
print(ob.area())
del ob.side # this statement determined that if i create a variable outside the class then it cen be delete like this
print(ob.area())
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login', views.loginUser, name='loginUser'),
path('logout', views.logoutUser, name='logoutUser'),
path('form', views.get_incident_report, name='get_incident_report')
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 30 09:50:05 2021
@author: aureoleday
"""
import numpy as np
def calc_lr(target,samples):
a = []
if samples.size == samples.shape[0]:
X = np.c_[np.ones(samples.shape[0]),samples]
a = (np.matrix(np.dot(X.T,X)).I).dot(X.T).dot(target)
else:
for col in samples.T:
X = np.c_[np.ones(samples.shape[0]),col]
w = (np.matrix(np.dot(X.T,X)).I).dot(X.T).dot(target)
if a == []:
a = w
else:
a = np.r_[a,w]
return a.T
data = np.loadtxt(open("test.csv","rb"),delimiter=",",skiprows=0)
y = data[:,0]
x = data[:,1:]
w = calc_lr(y,x)
np.savetxt("result_w.csv", w, delimiter=",",fmt='%10.5f')
print(w)
|
import numpy as np
def cluster_results(clusters, pids_array):
clusters_pids = [[] for _ in range(np.amax(clusters) + 1)] ## here we create the arrays where the pids will be
## the index is going to be the number of the cluster
for element in range(len(clusters)):
clusters_pids[clusters[element]].append(pids_array[element]) ## add the pids to the clusters_pids
# here we create it as an dictionary
print(clusters_pids)
clusters_dict = {i: clusters_pids[i] for i in range(0, len(clusters_pids))}
print(clusters_dict)
|
import csv
import cv2
import numpy as np
from sklearn.utils import shuffle
from preprocess import preprocess # resize image module
lines = []
# Left and right camera input function
def multicamera(line, lines, correction):
line_l, line_r = [], []
line_l = line
line_r = line
line_l[3] = float(line[3]) + correction
line_r[3] = float(line[3]) - correction
line_l[0] = line_l[1]
line_r[0] = line_r[2]
lines.append(line_l)
lines.append(line_r)
return lines
# open and multiple camera utilization
correction = 0.2
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# lines = multicamera(line, lines, correction)
#with open('./data/driving_log2.csv') as csvfile:
# reader = csv.reader(csvfile)
# for line in reader:
# lines.append(line)
# lines = multicamera(line, lines, correction)
with open('./data/driving_log3.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
# lines = multicamera(line, lines, correction)
# Split the samples
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# generator function
batch_size = 64
def generator(samples, batch_size):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = './data/IMG/'+batch_sample[0].split('\\')[-1]
image = cv2.imread(name)
resize = preprocess(image) # resize image
angle = float(batch_sample[3])
images.append(resize)
angles.append(angle)
images.append(cv2.flip(resize,1)) # augment a fillped image
angles.append(angle * -1.0)
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size)
validation_generator = generator(validation_samples, batch_size)
# The Nvidia CNN model
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape = (70, 204, 3)))
# model.add(Cropping2D(cropping=((70,25), (0,0)))) - I Didn't use it.
model.add(Convolution2D(24,5,5)) #Conv 1
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(36,5,5)) #Conv 2
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(48,5,5)) #Conv 3
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3)) #Conv 4
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3)) #Conv 5
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics = ['accuracy'])
# model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=10, batch_size=64) - No need after introducing generator.
model.fit_generator(train_generator, samples_per_epoch= len(train_samples)*2, validation_data=validation_generator, \
nb_val_samples=len(validation_samples)*2, nb_epoch=10) #train_sample and validation_sample multipled by 2 because of flipping augmentation
model.save('model.h5')
print(model.summary()) |
import datetime
from models import Base
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Float
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import validates
class PaymentPlan(Base):
__tablename__ = 'payment_plans'
type = 'payment_plan'
id = Column(Integer, primary_key=True, unique=True)
created = Column(DateTime)
modified = Column(DateTime, onupdate=datetime.datetime.utcnow)
property_id = Column(Integer, ForeignKey('properties.id'))
price = Column(Float, nullable=False)
settings = Column(JSONB)
payment_system_data = Column(JSONB)
meta = Column(JSONB)
@validates('modified')
def update_timestamp(self, key):
return datetime.datetime.now()
|
try:
import cv2
import numpy as np
from matplotlib import pyplot as plt
except:
print ("please install the dependencies \n using command pip3 install requirements.txt")
image=cv2.imread("images/obama.jpg") #reading the image into opencv
image_bw=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image_hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([image],[0],None,[256],[0,256]) #Calculating histogram of the image
cdf = hist.cumsum() #calculating the CDF of the histogram
plt.subplot(121) #to plot the histogram using matplotlib
plt.plot(hist)
plt.suptitle('Before and After histogram normalisation', fontsize=14) #histogram title
m=cdf.min()
d=(image_bw.shape[0]*image_bw.shape[1])-m
cdf_norm=((cdf-m)/d)*255
cdf_norm=cdf_norm.astype("uint8")
normalised_image=cdf_norm[image_bw] #mapping the ouput of cdf with the input image
#Calculating histogram of the nomalised image
hist = cv2.calcHist([normalised_image],[0],None,[256],[0,256])
cdf = hist.cumsum()
plt.subplot(122) #to plot the histogram using matplotlib
plt.plot(hist)
plt.show()
#histogram normalisation using opencv
normalised_image_2=cv2.equalizeHist(image_bw)
#brightness
new_image=cv2.cvtColor(image,cv2.COLOR_BGR2HSV) #converting the image to HSV and increasing only the Value part of the image (3rd channel)
value=25 #brightness increment value
new_image[:,:,2]=np.where((255-new_image[:,:,2]) <value,255,new_image[:,:,2]+value)
new_image=cv2.cvtColor(new_image,cv2.COLOR_HSV2BGR)
#saturation
new_image2=cv2.cvtColor(image,cv2.COLOR_BGR2HSV) #converting the image to HSV and increasing only the Value part of the image (3rd channel)
value=25 #saturation increment value
new_image2[:,:,1]=np.where((180-new_image2[:,:,1]) <value,180,new_image2[:,:,1]+value)
new_image2=cv2.cvtColor(new_image2,cv2.COLOR_HSV2BGR)
cv2.imshow("Black and White ,Histogram Normalised , Histogram Normalised using OpenCv",np.hstack((image_bw,normalised_image,normalised_image_2)))
cv2.imshow("orignal image , brightness increased image",np.hstack((image,new_image))) #To show the ouput
cv2.imshow("orignal image , saturation increased image",np.hstack((image,new_image2))) #To show the ouput
cv2.imshow("RGB image , HSV image",np.hstack((image,image_hsv)))
cv2.waitKey(0) #to keep the ouput window to open
cv2.destroyAllWindows() #pressing anykey to close the ouput windows
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper that samples a new task everytime the environment is reset."""
from mtenv import MTEnv
from mtenv.utils.types import ObsType
from mtenv.wrappers.multitask import MultiTask
class SampleRandomTask(MultiTask):
def __init__(self, env: MTEnv):
"""Wrapper that samples a new task everytime the environment is
reset.
Args:
env (MTEnv): Multitask environment to wrap over.
"""
super().__init__(env=env)
def reset(self) -> ObsType:
self.env.reset_task_state()
return self.env.reset()
|
# -*- python -*-
# Assignment: MathDojo
#
# HINT: To do this exercise, you will probably have to use 'return self'.
# If the method returns itself (an instance of itself), we can chain methods.
#
# PART I
#
# Create a Python class called MathDojo that has the methods:
# - add
# - subtract
# Have these 2 functions take at least 1 parameter.
#
# Then create a new instance called: md
# It should be able to do the following task:
#
# MathDojo().add(2).add(2, 5).subtract(3, 2).result
#
# Which should perform 0+2+(2+5)-(3+2) and return 4.
#
# PART II
#
# Modify MathDojo to take at least one integer(s) and/or list(s) as a parameter with any number of values
# passed into the list. It should now be able to perform the following tasks:
#
# MathDojo().add([1],3,4).add([3, 5, 7, 8], [2, 4.3, 1.25]).subtract(2, [2,3], [1.1, 2.3]).result
#
# Which should perform 0+1+3+4+(3+5+7+8)+(2+4.3+1.25)-2-(2+3)-(1.1+2.3) and return its result.
#
# PART III
#
# Make any needed changes in MathDojo in order to support tuples of values
# (in addition to lists and singletons).
class MathDojo( object ):
def __init__( self ):
self.result = 0
def add( self, *args ):
for i in args:
if isinstance( i, int ) or isinstance( i, float ):
self.result += i
elif isinstance( i, list ) or isinstance( i, tuple ):
for j in i:
self.add( j )
return( self )
def subtract( self, *args ):
for i in args:
if isinstance( i, int ) or isinstance( i, float ):
self.result -= i
elif isinstance( i, list ) or isinstance( i, tuple ):
for j in i:
self.subtract( j )
return( self )
# Testing
print MathDojo().add(2).add(2, 5).subtract(3, 2).result, "#=> 4"
print MathDojo().add(4,[1],(3)).add(([3, 5], 7, 8), [2, 4.3, 1.25]).subtract(2, (2,3), [1.1, 2.3]).result, "#=> 28.15"
|
from svg.path import parse_path
|
from pymel import core as pm
import maya.cmds as cmds
# TBS for the modern era
def tbs():
particles = check_selection()
if not particles: return
for p in particles:
print p, p.nodeType()
# check to see if particles are already TBS
if pm.objExists("{}.isBig".format(p)):
pm.displayWarning("{} is already TBS".format(p))
continue
transform = p.getParent()
if not pm.objExists("{}.radiusPP".format(p)):
pm.addAttr(p, ln="radiusPP", dt="doubleArray")
pm.addAttr(p, ln="radiusPP0", dt="doubleArray")
if not pm.objExists("{}.RGBPP".format(p)):
pm.addAttr(p, ln="rgbPP", dt="vectorArray")
pm.addAttr(p, ln="rgbPP0", dt="vectorArray")
# add attributes
pm.addAttr(p, ln = "isBig", dt = "doubleArray")
pm.addAttr(p, ln = "isBig0", dt = "doubleArray")
pm.addAttr(p, ln = "noiseMod", dt = "doubleArray")
pm.addAttr(p, ln = "noiseMod0", dt = "doubleArray")
pm.addAttr(p, ln = "isChild", dt = "doubleArray")
pm.addAttr(p, ln = "isChild0", dt = "doubleArray")
transform.addAttr("lifespanBig", at = "double", min = 0, dv = 2, k = True)
transform.addAttr("lifespanBigRand", at = "double", min = 0, dv = 1, k = True)
transform.addAttr("lifespanSmall", at = "double", min = 0, dv = .75, k = True)
transform.addAttr("lifespanSmallRand", at = "double", min = 0, dv = .37, k = True)
transform.addAttr("percentBig", at = "double", min = 0, max = 100, dv = 20, k = True)
transform.addAttr("twinkle", at = "double", min = 0, max = 1, dv = .5, k = True)
transform.addAttr("twinkleSpeed", at = "double", min = 0, dv = 4, k = True)
p.addAttr("lifespanBig", at = "double", min = 0, dv = 2, k = True)
p.addAttr("lifespanBigRand", at = "double", min = 0, dv = 1, k = True)
p.addAttr("lifespanSmall", at = "double", min = 0, dv = .75, k = True)
p.addAttr("lifespanSmallRand", at = "double", min = 0, dv = .37, k = True)
p.addAttr("percentBig", at = "double", min = 0, max = 100, dv = 20, k = True)
p.addAttr("twinkle", at = "double", min = 0, max = 1, dv = .5, k = True)
p.addAttr("twinkleSpeed", at = "double", min = 0, dv = 4, k = True)
#connect the attrs on transform to the attrs on the shape
transform.lifespanBig.connect(p.lifespanBig)
transform.lifespanBigRand.connect(p.lifespanBigRand)
transform.lifespanSmall.connect(p.lifespanSmall)
transform.lifespanSmallRand.connect(p.lifespanSmallRand)
transform.percentBig.connect(p.percentBig)
transform.twinkle.connect(p.twinkle)
transform.twinkleSpeed.connect(p.twinkleSpeed)
# set attributes
p.particleRenderType.set(4)
p.lifespanMode.set(3)
p.radiusScaleInput.set(2)
pm.setAttr("%s.radius"%p, .05)
p.radiusScale[0].radiusScale_Position.set(.85)
p.radiusScale[1].radiusScale_Position.set(1)
p.radiusScale[1].radiusScale_FloatValue.set(0)
p.radiusScaleRandomize.set(.25)
# Old Expressions
"""
pm.dynExpression(p,
s=".noiseMod = rand(2);\nseed(.particleId);\nfloat $twinkle = ((noise(.age * .twinkleSpeed * .noiseMod) + 1) * .twinkle) / 2;\n\nif (.percentBig && .particleId % floor(100/.percentBig) == 0 && !.isChild)\n{\n .isBig = 1;\n .lifespanPP = .lifespanBig + rand(0 - .lifespanBigRand,.lifespanBigRand);\n .rgbPP = <<1 - $twinkle, 0, 0 >>;\n}\nelse\n{\n .isBig = 0;\n .lifespanPP = .lifespanSmall + rand(0 - .lifespanSmallRand,.lifespanSmallRand);\n .rgbPP = <<0, 0, 1 - $twinkle >>;\n}",
c=True)
pm.dynExpression(p,
s="seed(.particleId);\nfloat $twinkle = ((noise(.age * .twinkleSpeed * .noiseMod) + 1) * .twinkle) / 2;\n\nif (.isBig){\n .rgbPP = <<1 - $twinkle, 0, 0 >>;\n}\nelse {\n .rgbPP = <<0, 0, 1 - $twinkle>>;\n}",
rbd=True)
"""
# expression
pm.dynExpression(p,
s=".noiseMod = rand(2);\nseed(.particleId);\nfloat $twinkle = ((noise(.age * .twinkleSpeed * .noiseMod) + 1) * .twinkle) / 2;\n\nif (.percentBig && .particleId % floor(100/.percentBig) == 0 && !.isChild)\n{\n .isBig = 1;\n .lifespanPP = .lifespanBig + rand(0 - .lifespanBigRand,.lifespanBigRand);\n .rgbPP = <<1 - $twinkle, 0, 0 >>;\n}\nelse if ( .particleId % 2 == 0)\n{\n .isBig = 0;\n .lifespanPP = .lifespanSmall + rand(0 - .lifespanSmallRand,.lifespanSmallRand);\n .rgbPP = <<0, 1 - $twinkle, 0 >>;\n}\nelse\n{\n .isBig = 0;\n .lifespanPP = .lifespanSmall + rand(0 - .lifespanSmallRand,.lifespanSmallRand);\n .rgbPP = <<0, 0, 1 - $twinkle >>;\n}",
c=True)
pm.dynExpression(p,
s="seed(.particleId);\nfloat $twinkle = ((noise(.age * .twinkleSpeed * .noiseMod) + 1) * .twinkle) / 2;\n\nif (.isBig){\n .rgbPP = <<1 - $twinkle, 0, 0 >>;\n}\nelse if ( .particleId % 2 == 0)\n{\n .rgbPP = <<0, 1 - $twinkle, 0>>;\n}\nelse {\n .rgbPP = <<0, 0, 1 - $twinkle>>;\n}" ,
rbd=True)
pm.setAttr(p + ".particleRenderType", 3)
pm.setAttr(p + ".pointSize", 1)
cmds.vray("addAttributesFromGroup", p, "vray_particle_export_attributes", 1)
pm.setAttr(p + ".vrayPPExportRGB", 1)
pm.setAttr(p + ".vrayPPExportOpacity", 1)
pm.setAttr(p + ".opacityScaleInput", 2)
pm.setAttr(p + ".opacityScale[0].opacityScale_Interp", 2)
pm.setAttr(p + ".opacityScale[0].opacityScale_Position", 0.6)
pm.setAttr(p + ".opacityScale[1].opacityScale_FloatValue", 0.0)
pm.setAttr(p + ".opacityScale[1].opacityScale_Position", 1.0)
pm.setAttr(p + ".opacityScale[1].opacityScale_Interp", 2)
pm.hyperShade(smn=True)
mat = None
for node in pm.ls(sl=1):
if node.type() == 'blinn':
SG = pm.listConnections(node, d=True, t='shadingEngine')[0]
pSamp = pm.listConnections(node, d=True, t='particleSamplerInfo')[0]
pm.delete(node)
ss = pm.shadingNode('surfaceShader', asShader=True)
pm.rename(ss, 'TBS_shader')
pm.connectAttr(ss.outColor, SG.surfaceShader)
pm.connectAttr(pSamp.rgbPP, ss.outColor)
pm.connectAttr(pSamp.opacityPP, ss.outMatteOpacity.outMatteOpacityR)
pm.connectAttr(pSamp.opacityPP, ss.outMatteOpacity.outMatteOpacityG)
pm.connectAttr(pSamp.opacityPP, ss.outMatteOpacity.outMatteOpacityB)
elif node.type() == 'particleCloud':
print 'Deleting volume shader: ', node
pm.delete(node)
def check_selection():
# Check to make sure at least one nParticle system is selected
sel = pm.ls(sl=True)
if not sel:
pm.displayWarning("Nothing selected, Select a nParticle system to make TBS")
return
particles = []
for s in sel:
shapes = s.listRelatives(shapes=True)
for shape in shapes:
print shape.nodeType()
if shape.nodeType() == "nParticle":
particles.append(shape)
if not particles:
pm.displayWarning("Selection is not an nParticle, Select a nParticle system to make TBS")
return
return particles |
import argparse
import json
from os.path import join
from typing import List
import numpy as np
import pandas as pd
from tqdm import tqdm
from docqa import trainer
from docqa.data_processing.document_splitter import MergeParagraphs, TopTfIdf, ShallowOpenWebRanker, FirstN
from docqa.data_processing.preprocessed_corpus import preprocess_par
from docqa.data_processing.qa_training_data import ParagraphAndQuestionDataset
from docqa.data_processing.span_data import TokenSpans
from docqa.data_processing.text_utils import NltkPlusStopWords
from docqa.dataset import FixedOrderBatcher
from docqa.eval.ranked_scores import compute_ranked_scores
from docqa.evaluator import Evaluator, Evaluation
from docqa.model_dir import ModelDir
from build_span_corpus import XQADataset
from docqa.triviaqa.read_data import normalize_wiki_filename
from docqa.triviaqa.training_data import DocumentParagraphQuestion, ExtractMultiParagraphs, \
ExtractMultiParagraphsPerQuestion
from docqa.triviaqa.trivia_qa_eval import exact_match_score as trivia_em_score
from docqa.triviaqa.trivia_qa_eval import f1_score as trivia_f1_score
from docqa.utils import ResourceLoader, print_table
"""
Evaluate on XQA data
Modified from docqa/eval/triviaqa_full_document_eval.py
"""
class RecordParagraphSpanPrediction(Evaluator):
def __init__(self, bound: int, record_text_ans: bool):
self.bound = bound
self.record_text_ans = record_text_ans
def tensors_needed(self, prediction):
span, score = prediction.get_best_span(self.bound)
needed = dict(spans=span, model_scores=score)
return needed
def evaluate(self, data: List[DocumentParagraphQuestion], true_len, **kargs):
spans, model_scores = np.array(kargs["spans"]), np.array(kargs["model_scores"])
pred_f1s = np.zeros(len(data))
pred_em = np.zeros(len(data))
text_answers = []
for i in tqdm(range(len(data)), total=len(data), ncols=80, desc="scoring"):
point = data[i]
if point.answer is None and not self.record_text_ans:
continue
text = point.get_context()
pred_span = spans[i]
pred_text = " ".join(text[pred_span[0]:pred_span[1] + 1])
if self.record_text_ans:
text_answers.append(pred_text)
if point.answer is None:
continue
f1 = 0
em = False
for answer in data[i].answer.answer_text:
ans = answer
f1 = max(f1, trivia_f1_score(pred_text, ans))
if not em:
em = trivia_em_score(pred_text, ans)
pred_f1s[i] = f1
pred_em[i] = em
results = {}
results["n_answers"] = [0 if x.answer is None else len(x.answer.answer_spans) for x in data]
if self.record_text_ans:
results["text_answer"] = text_answers
results["predicted_score"] = model_scores
results["predicted_start"] = spans[:, 0]
results["predicted_end"] = spans[:, 1]
results["text_f1"] = pred_f1s
results["rank"] = [x.rank for x in data]
results["text_em"] = pred_em
results["para_start"] = [x.para_range[0] for x in data]
results["para_end"] = [x.para_range[1] for x in data]
results["question_id"] = [x.question_id for x in data]
results["doc_id"] = [x.doc_id for x in data]
return Evaluation({}, results)
def main():
parser = argparse.ArgumentParser(description='Evaluate a model on TriviaQA data')
parser.add_argument('model', help='model directory')
parser.add_argument('-p', '--paragraph_output', type=str,
help="Save fine grained results for each paragraph in csv format")
parser.add_argument('-o', '--official_output', type=str, help="Build an offical output file with the model's"
" most confident span for each (question, doc) pair")
parser.add_argument('--no_ema', action="store_true", help="Don't use EMA weights even if they exist")
parser.add_argument('--n_processes', type=int, default=None,
help="Number of processes to do the preprocessing (selecting paragraphs+loading context) with")
parser.add_argument('-i', '--step', type=int, default=None, help="checkpoint to load, default to latest")
parser.add_argument('-n', '--n_sample', type=int, default=None, help="Number of questions to evaluate on")
parser.add_argument('-a', '--async', type=int, default=10)
parser.add_argument('-t', '--tokens', type=int, default=400,
help="Max tokens per a paragraph")
parser.add_argument('-g', '--n_paragraphs', type=int, default=15,
help="Number of paragraphs to run the model on")
parser.add_argument('-f', '--filter', type=str, default=None, choices=["tfidf", "truncate", "linear"],
help="How to select paragraphs")
parser.add_argument('-b', '--batch_size', type=int, default=200,
help="Batch size, larger sizes might be faster but wll take more memory")
parser.add_argument('--max_answer_len', type=int, default=8,
help="Max answer span to select")
parser.add_argument('-c', '--corpus',
choices=["en_dev",
"en_test",
"fr_dev",
"fr_test",
"de_dev",
"de_test",
"ru_dev",
"ru_test",
"pt_dev",
"pt_test",
"zh_dev",
"zh_test",
"pl_dev",
"pl_test",
"uk_dev",
"uk_test",
"ta_dev",
"ta_test",
"fr_trans_en_dev",
"fr_trans_en_test",
"de_trans_en_dev",
"de_trans_en_test",
"ru_trans_en_dev",
"ru_trans_en_test",
"pt_trans_en_dev",
"pt_trans_en_test",
"zh_trans_en_dev",
"zh_trans_en_test",
"pl_trans_en_dev",
"pl_trans_en_test",
"uk_trans_en_dev",
"uk_trans_en_test",
"ta_trans_en_dev",
"ta_trans_en_test"],
required=True)
parser.add_argument('--dump_data_pickle_only', action="store_true", default=False)
args = parser.parse_args()
model_dir = ModelDir(args.model)
model = model_dir.get_model()
corpus_name = args.corpus[:args.corpus.rfind("_")]
eval_set = args.corpus[args.corpus.rfind("_")+1:]
dataset = XQADataset(corpus_name)
if eval_set == "dev":
test_questions = dataset.get_dev()
elif eval_set == "test":
test_questions = dataset.get_test()
else:
raise AssertionError()
corpus = dataset.evidence
splitter = MergeParagraphs(args.tokens)
per_document = args.corpus.startswith("web") # wiki and web are both multi-document
filter_name = args.filter
if filter_name is None:
# Pick default depending on the kind of data we are using
if per_document:
filter_name = "tfidf"
else:
filter_name = "linear"
print("Selecting %d paragraphs using method \"%s\" per %s" % (
args.n_paragraphs, filter_name, ("question-document pair" if per_document else "question")))
if filter_name == "tfidf":
para_filter = TopTfIdf(NltkPlusStopWords(punctuation=True), args.n_paragraphs)
elif filter_name == "truncate":
para_filter = FirstN(args.n_paragraphs)
elif filter_name == "linear":
para_filter = ShallowOpenWebRanker(args.n_paragraphs)
else:
raise ValueError()
n_questions = args.n_sample
if n_questions is not None:
test_questions.sort(key=lambda x:x.question_id)
np.random.RandomState(0).shuffle(test_questions)
test_questions = test_questions[:n_questions]
print("Building question/paragraph pairs...")
# Loads the relevant questions/documents, selects the right paragraphs, and runs the model's preprocessor
if per_document:
prep = ExtractMultiParagraphs(splitter, para_filter, model.preprocessor, require_an_answer=False)
else:
prep = ExtractMultiParagraphsPerQuestion(splitter, para_filter, model.preprocessor, require_an_answer=False)
prepped_data = preprocess_par(test_questions, corpus, prep, args.n_processes, 1000)
data = []
for q in prepped_data.data:
for i, p in enumerate(q.paragraphs):
if q.answer_text is None:
ans = None
else:
ans = TokenSpans(q.answer_text, p.answer_spans)
data.append(DocumentParagraphQuestion(q.question_id, p.doc_id,
(p.start, p.end), q.question, p.text,
ans, i))
# Reverse so our first batch will be the largest (so OOMs happen early)
questions = sorted(data, key=lambda x: (x.n_context_words, len(x.question)), reverse=True)
if args.dump_data_pickle_only:
# dump eval data for bert
import pickle
pickle.dump(questions, open("%s_%d.pkl" % (args.corpus, args.n_paragraphs), "wb"))
return
print("Done, starting eval")
if args.step is not None:
if args.step == "latest":
checkpoint = model_dir.get_latest_checkpoint()
else:
checkpoint = model_dir.get_checkpoint(int(args.step))
else:
checkpoint = model_dir.get_best_weights()
if checkpoint is not None:
print("Using best weights")
else:
print("Using latest checkpoint")
checkpoint = model_dir.get_latest_checkpoint()
test_questions = ParagraphAndQuestionDataset(questions, FixedOrderBatcher(args.batch_size, True))
evaluation = trainer.test(model,
[RecordParagraphSpanPrediction(args.max_answer_len, True)],
{args.corpus:test_questions}, ResourceLoader(), checkpoint, not args.no_ema, args.async)[args.corpus]
if not all(len(x) == len(data) for x in evaluation.per_sample.values()):
raise RuntimeError()
df = pd.DataFrame(evaluation.per_sample)
if args.official_output is not None:
print("Saving question result")
fns = {}
if per_document:
# I didn't store the unormalized filenames exactly, so unfortunately we have to reload
# the source data to get exact filename to output an official test script
print("Loading proper filenames")
if args.corpus == 'web-test':
source = join(TRIVIA_QA, "qa", "web-test-without-answers.json")
elif args.corpus == "web-dev":
source = join(TRIVIA_QA, "qa", "web-dev.json")
else:
raise AssertionError()
with open(join(source)) as f:
data = json.load(f)["Data"]
for point in data:
for doc in point["EntityPages"]:
filename = doc["Filename"]
fn = join("wikipedia", filename[:filename.rfind(".")])
fn = normalize_wiki_filename(fn)
fns[(point["QuestionId"], fn)] = filename
answers = {}
scores = {}
for q_id, doc_id, start, end, txt, score in df[["question_id", "doc_id", "para_start", "para_end",
"text_answer", "predicted_score"]].itertuples(index=False):
filename = dataset.evidence.file_id_map[doc_id]
if per_document:
if filename.startswith("web"):
true_name = filename[4:] + ".txt"
else:
true_name = fns[(q_id, filename)]
key = q_id + "--" + true_name
else:
key = q_id
prev_score = scores.get(key)
if prev_score is None or prev_score < score:
scores[key] = score
answers[key] = txt
with open(args.official_output, "w") as f:
json.dump(answers, f)
output_file = args.paragraph_output
if output_file is not None:
print("Saving paragraph result")
df.to_csv(output_file, index=False)
print("Computing scores")
if per_document:
group_by = ["question_id", "doc_id"]
else:
group_by = ["question_id"]
# Print a table of scores as more paragraphs are used
df.sort_values(group_by + ["rank"], inplace=True)
f1 = compute_ranked_scores(df, "predicted_score", "text_f1", group_by)
em = compute_ranked_scores(df, "predicted_score", "text_em", group_by)
table = [["N Paragraphs", "EM", "F1"]]
table += list([str(i+1), "%.4f" % e, "%.4f" % f] for i, (e, f) in enumerate(zip(em, f1)))
print_table(table)
if __name__ == "__main__":
main()
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-07-08
import json
with open("test.txt2") as f:
a = f.read()
print(json.dumps(json.loads(a)))
|
# Выведите разложение натурального числа n > 1 на простые множители. Простые множители должны быть упорядочены по возрастанию и разделены пробелами.
# Sample Input:
# 75
# Sample Output:
# 3 5 5
# import java.util.Scanner;
# class Main {
# public static void main(String[] args) {
# int n = 0;
# Scanner sc = new Scanner(System.in);
# if (sc.hasNextInt())
# n = sc.nextInt();
# for (int i = 2; i <= n; i++)
# if (test(i))
# if (n % i == 0) {
# n = n / i;
# System.out.print(i + " ");
# i = 2;} }
# public static boolean test(long n) {
# for (long i = 2;i <= Math.sqrt(n);i++)
# if (n % i == 0)
# return false;
# return true;
# }
# }
n = 75
# n = int(input())
a = []
while n > 1:
i = 2
while 1:
if n%i==0:
a.append(i)
n = n // i
break
else:
i += 1
for i in a:
print(i,end=' ')
print() |
#!/usr/bin/env python3
import subprocess
cmd = "open"
arg = "-a"
prog = "intelliJ IDEA CE"
print('opening '+prog+'...')
subprocess.Popen([cmd, arg, prog])
|
import numpy as np
import tensorflow as tf
from draw import *
x = [1,2,3]
y = [[2,3,4]]
sess = tf.Session()
with tf.variable_scope(''):
y = tf.get_variable(name = 'sigh',initializer = tf.initializers.constant(3),shape = [1,2])
y2 = tf.get_variable(name = 'sigh2',initializer = tf.initializers.constant(3),shape = [3,2])
with tf.variable_scope('', reuse=True):
z = tf.get_variable(name = 'sigh' )
sess.run(tf.global_variables_initializer())
a = tf.get_default_graph().get_tensor_by_name('sigh2:0')
b = tf.size(a,'float32')
b2 = tf.size(a)
c = b / 4
d = b2 / 4
print(sess.run((c,d))) |
min = int(input('enter the number'))
max = int(input('enter the number'))
for i in range(min,max+1):
print(i) |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class KuaishouItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class KuxuanKolUserItem(scrapy.Item):
# define the fields for your item here like:
spider_name = scrapy.Field()
id = scrapy.Field()
userId = scrapy.Field()
kwaiId = scrapy.Field()
principalId = scrapy.Field()
cityName = scrapy.Field()
fan = scrapy.Field()
headurl = scrapy.Field()
ku_value = scrapy.Field()
photo = scrapy.Field()
user_name = scrapy.Field()
user_sex = scrapy.Field()
user_text = scrapy.Field()
avg_view_count = scrapy.Field()
avg_like_count = scrapy.Field()
avg_comment_count = scrapy.Field()
categorys = scrapy.Field()
class KuaishouUserInfoIterm(scrapy.Item):
spider_name = scrapy.Field()
userId = scrapy.Field()
kwaiId = scrapy.Field()
principalId = scrapy.Field()
constellation = scrapy.Field()
cityName = scrapy.Field()
fan = scrapy.Field()
follow = scrapy.Field()
photo = scrapy.Field()
liked = scrapy.Field()
open = scrapy.Field()
playback = scrapy.Field()
nickname = scrapy.Field()
avatar = scrapy.Field()
sex = scrapy.Field()
description = scrapy.Field()
class KuaishouUserPhotoInfoIterm(scrapy.Item):
spider_name = scrapy.Field()
user_photo_info = scrapy.Field()
class KuaishouPhotoCommentInfoIterm(scrapy.Item):
spider_name = scrapy.Field()
photo_id = scrapy.Field()
photo_comment_info = scrapy.Field()
class KuaishouShopInfoIterm(scrapy.Item):
spider_name = scrapy.Field()
userId = scrapy.Field()
shopInfo = scrapy.Field()
class KuaishouShopProductItem(scrapy.Item):
spider_name = scrapy.Field()
userId = scrapy.Field()
productId = scrapy.Field()
productInfo = scrapy.Field()
class KuaishouShopProductDetailItem(scrapy.Item):
spider_name = scrapy.Field()
productId = scrapy.Field()
productDetail = scrapy.Field()
class KuaishouShopProductCommentItem(scrapy.Item):
spider_name = scrapy.Field()
productId = scrapy.Field()
productComment = scrapy.Field()
|
import math
'''
This is an implementation of the Taylor's series metod of approximating
ordinary differenetial equations. Not quite as effective as the Runge Kutta
method.
'''
def T4():
# initial conditions x(0) = 1, step size of 1/100
x = 1
t = 0
h = 0.01
while t <= 1:
print("t = %2.3f\tx = %9.9f" % (t, x))
st = 1 + t # the successor of t
xp = x / st
xpp = xp / st - x / (st**2)
xppp = xpp / st - 2*xp / (st**2) + 2*x / (st**3)
xpppp = xppp / st - 3*xpp / (st**2) + 2*xp / (st**3) + 6*x / (st**4)
x = x + h * (xp + h/2 * (xpp + h/3 * (xppp + h/4*xpppp)))
t = t + h # this could cause some error
T4()
|
import numpy as np
from numpy.random import Generator, PCG64
import matplotlib.pyplot as plt
import time
a = np.arange(100000)
r = Generator(PCG64())
start = time.time()
for x in a:
np.exp(r.standard_normal()*10000)
end = time.time()
print("duration :")
print((end - start)*1000)
print(" ms")
|
# Реализуйте стохастический градиентный спуск, то есть методы SGD (stochastic gradient descent) и update_mini_batch класса Neuron. Когда вы решите сдать задачу, вам нужно будет просто скопировать соответствующие функции (которые вы написали в ноутбуке ) сюда. Копируем без учёта отступов; шаблон в поле ввода ответа уже будет, ориентируйтесь по нему. Сигнатура функции указана в ноутбуке, она остаётся неизменной.
# Задание получилось очень сложным, особенно для тех, у кого мало опыта программирования. Внимательно читайте комментарии в предоставленном коде, чтобы понять, что требуется от ваших функций. Главное - не спешите при написании кода, это приводит к обидным ошибкам и огромным временным затратам.
# SGD реализует основной цикл алгоритма. Должен возвращать 1, если градиентный спуск сошёлся, и 0 — если максимальное число итераций было достигнуто раньше, чем изменения в целевой функции стали достаточно малы.
# update_mini_batch считает градиент и обновляет веса нейрона на основе всей переданной ему порции данных, кроме того, возвращает 1, если алгоритм сошелся (абсолютное значение изменения целевой функции до и после обновления весов <
# eps), иначе возвращает 0.
# Необходимые внешние методы (compute_grad_analytically, J_quadratic) уже определены чуть ниже класса Neuron.
# Вам могут быть полезны такие функции, как:
# np.arange - создать последовательность (хотя можно обойтись и просто list(range( ... )))
# np.random.shuffle - перемешать последовательность
# np.random.choice - случайным образом выбрать нужное количество элементов из последовательности
# Если чувствуете, что решение получается громоздким (функция SGD занимает сильно больше 10 строчек) - можно повторить урок по numpy. По крайней мере, не забывайте, что если X это матрица (np.array со shape = (n, m)), а idx = [1, 5, 3], то X[idx] вернёт вам новую матрицу с тремя соответствующими строчками из X. Кроме того, X[3:5] вернёт вам строки c индексами 3 и 4 (не забывайте, что у нас есть еще нулевая строка). Обратите внимание, что если вы при такой индексации выйдете за границы массива - ошибки не будет, вернётся пустой или неполный (по сравнению с тем, что вы ожидали) набор строк.
# Наиболее частые ошибки:
# Неправильное формирование батча. Батч должен формироваться заново перед каждым вызовом update_mini_batch.
# Неправильная проверка условия выхода из цикла (превышения количества допустимых вызовов update_mini_batch )
# Неправильная проверка условия схождения алгоритма в update_mini_batch
# Самостоятельное переписывание (вместо переиспользования) предоставленных функций/методов
# Отсутствие self. перед обращением к атрибутам / методам класса
# Ошибки по невнимательности (впечатляющее разнообразие, в том числе: выходы за границы массива, формирование батча только по X, независимое перемешивание X и y, путаница с размерностями и индексацией, и многое другое ... )
# P.S.
# Если очень долго не найти ошибку - напишите псевдокод на бумажке, перепишите функцию "с нуля". Может оказаться быстрее чем выискивать какую-нибудь коварную мелочь. Читайте комментарии, там очень много полезного. Иногда, в конце концов, полезно отвлечься, ключевая идея может совершенно неожиданно прийти "за чашечкой чая". Главное - не отчаивайтесь, удачи!
import numpy as np
def SGD(self, X, y, batch_size, learning_rate=0.1, eps=1e-6, max_steps=200):
for e in range(max_steps):
epoch = list(range(len(X)))
np.random.shuffle(epoch)
for batch in [epoch[i:i + batch_size] for i in range(0, len(epoch), batch_size)]:
if self.update_mini_batch(X[batch], y[batch], learning_rate, eps) != 0:
return 1
return 0
def update_mini_batch(self, X, y, learning_rate, eps):
before = J_quadratic(self, X, y)
grad = compute_grad_analytically(self, X, y)
dw = -grad * learning_rate
self.w += dw
after = J_quadratic(self, X, y)
return 1 if abs(after - before) < eps else 0 |
"""
Channel resource implementation.
"""
from typing import Optional, Union
from pyyoutube.error import PyYouTubeException, ErrorMessage, ErrorCode
from pyyoutube.resources.base_resource import Resource
from pyyoutube.models import Channel, ChannelListResponse
from pyyoutube.utils.params_checker import enf_comma_separated, enf_parts
class ChannelsResource(Resource):
"""A channel resource contains information about a YouTube channel.
References: https://developers.google.com/youtube/v3/docs/channels
"""
def list(
self,
parts: Optional[Union[str, list, tuple, set]] = None,
for_username: Optional[str] = None,
channel_id: Optional[Union[str, list, tuple, set]] = None,
managed_by_me: Optional[bool] = None,
mine: Optional[bool] = None,
hl: Optional[str] = None,
max_results: Optional[int] = None,
on_behalf_of_content_owner: Optional[str] = None,
page_token: Optional[str] = None,
return_json: bool = False,
**kwargs: Optional[dict],
) -> Union[dict, ChannelListResponse]:
"""Returns a collection of zero or more channel resources that match the request criteria.
Args:
parts:
Comma-separated list of one or more channel resource properties.
Accepted values: id,auditDetails,brandingSettings,contentDetails,contentOwnerDetails,
localizations,snippet,statistics,status,topicDetails
for_username:
The parameter specifies a YouTube username, thereby requesting
the channel associated with that username.
channel_id:
The parameter specifies a comma-separated list of the YouTube channel ID(s)
for the resource(s) that are being retrieved.
managed_by_me:
Set this parameter's value to true to instruct the API to only return channels
managed by the content owner that the onBehalfOfContentOwner parameter specifies.
The user must be authenticated as a CMS account linked to the specified content
owner and onBehalfOfContentOwner must be provided.
mine:
Set this parameter's value to true to instruct the API to only return channels
owned by the authenticated user.
hl:
The hl parameter instructs the API to retrieve localized resource metadata for
a specific application language that the YouTube website supports.
The parameter value must be a language code included in the list returned by the
i18nLanguages.list method.
max_results:
The parameter specifies the maximum number of items that should be returned
the result set.
Acceptable values are 0 to 50, inclusive. The default value is 5.
on_behalf_of_content_owner:
The onBehalfOfContentOwner parameter indicates that the request's authorization
credentials identify a YouTube CMS user who is acting on behalf of the content
owner specified in the parameter value. This parameter is intended for YouTube
content partners that own and manage many difference YouTube channels. It allows
content owners to authenticate once and get access to all their video and channel
data, without having to provide authentication credentials for each individual channel.
The CMS account that the user authenticates with must be linked to the specified YouTube content owner.
page_token:
The parameter identifies a specific page in the result set that should be returned.
return_json:
Type for returned data. If you set True JSON data will be returned.
**kwargs:
Additional parameters for system parameters.
Refer: https://cloud.google.com/apis/docs/system-parameters.
Returns:
Channel data
Raises:
PyYouTubeException: Missing filter parameter.
Request not success.
"""
params = {
"part": enf_parts(resource="channels", value=parts),
"hl": hl,
"maxResults": max_results,
"onBehalfOfContentOwner": on_behalf_of_content_owner,
"pageToken": page_token,
**kwargs,
}
if for_username is not None:
params["forUsername"] = for_username
elif channel_id is not None:
params["id"] = enf_comma_separated(field="channel_id", value=channel_id)
elif managed_by_me is not None:
params["managedByMe"] = managed_by_me
elif mine is not None:
params["mine"] = mine
else:
raise PyYouTubeException(
ErrorMessage(
status_code=ErrorCode.MISSING_PARAMS,
message=f"Specify at least one of for_username,channel_id,managedByMe or mine",
)
)
response = self._client.request(path="channels", params=params)
data = self._client.parse_response(response=response)
return data if return_json else ChannelListResponse.from_dict(data)
def update(
self,
part: str,
body: Union[dict, Channel],
on_behalf_of_content_owner: Optional[str] = None,
return_json: bool = False,
**kwargs,
) -> Union[dict, Channel]:
"""Updates a channel's metadata.
Note that this method currently only supports updates to the channel resource's brandingSettings,
invideoPromotion, and localizations objects and their child properties.
Args:
part:
The part parameter serves two purposes in this operation. It identifies the properties
that the write operation will set as well as the properties that the API response will include.
body:
Provide channel data in the request body. You can give dataclass or just a dict with data.
on_behalf_of_content_owner:
The onBehalfOfContentOwner parameter indicates that the request's authorization
credentials identify a YouTube CMS user who is acting on behalf of the content
owner specified in the parameter value. This parameter is intended for YouTube
content partners that own and manage many different YouTube channels. It allows
content owners to authenticate once and get access to all their video and channel
data, without having to provide authentication credentials for each individual channel.
The CMS account that the user authenticates with must be linked to the specified YouTube content owner.
return_json:
Type for returned data. If you set True JSON data will be returned.
**kwargs:
Additional parameters for system parameters.
Refer: https://cloud.google.com/apis/docs/system-parameters.
Returns:
Channel updated data.
"""
params = {
"part": part,
"onBehalfOfContentOwner": on_behalf_of_content_owner,
**kwargs,
}
response = self._client.request(
method="PUT",
path="channels",
params=params,
json=body,
)
data = self._client.parse_response(response=response)
return data if return_json else Channel.from_dict(data)
|
#!/system/bin/python
#Coder by jimmyromanticdevil
#Recoder by ./Mr.Java404
#Date & Time 10/07/2017 [06:09]
#Team N45HT (Exploiting and Creativity)
import urllib2
import urllib
import sys
import time
import random
import re
import os
os.system("clear")
#Warna
B = '\033[1m' #Bold
R = '\033[31m' #Red
G = '\033[32m' #Green
Y = '\033[33m' #Yellow
BL = '\033[34m' #Blue
P = '\033[35m' #Purple
W = '\033[37m' #White
U = '\033[2m' #Underline
N = '\033[0m' #Normal
#Pastikan Proxy List 1 Dir Dengan Script Python Ini
proxy_list = "proxylist.txt"
bacod = ['Mozilla/4.0 (compatible; MSIE 5.0; SunOS 5.10 sun4u; X11)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.2pre) Gecko/20100207 Ubuntu/9.04 (jaunty) Namoroka/3.6.2pre',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser;',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)',
'Microsoft Internet Explorer/4.0b1 (Windows 95)',
'Opera/8.00 (Windows NT 5.1; U; en)',
'amaya/9.51 libwww/5.4.0',
'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; ZoomSpider.net bot; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QihooBot 1.0 qihoobot@qihoo.net)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 5.11 [en]']
#Hargai Pembuat!.. Coding Ga Gampang!..
gblk = ['http://google.com','http://bing.com','http://facebook.com','http://twitter.com','http://yahoo.com']
print B+G+""
print " _ _ __"
print "| \ / |____ |_ |_____ __ __ _____"
print "| \_/ | - / | | |\ \ / /| |"
print "| | _/ | | [-] | \ \/ / | [-] |"
print "| |\_/| | \ _| | _ | \ / | _ |"
print "|_| |_|_|_\ [] |___|_| |_| \/ |_| |_|"
print B+R+""
print " __ ___ __"
print " / | | | / |"
print " / o | | | / o |"
print "/_ | | 0 | /_ |"
print " | | | | | |"
print " |__| |___| |__|"
time.sleep(2)
print ''
print B+BL+'#-----------------------------------------#'
print B+R+' \!/p-store.net/user/khaidiraje\!/'
print B+BL+'#-----------------------------------------#'
print B+W+'[x] Matikan Data Terlebih Dahulu dan Aktifkan Kembali'
print B+W+'[x] Pastikan Url Diawalinya Dengan http or https'
print B+W+'[x] Saat Memasukan [Url Visitor] Tidak Terlihat'
print B+W+'[x] Jika Kesalahan Dalam Url Silahkan Close Terminal and Open Kembali'
print B+W+'[x] terimakasih telah berbelanja di toko saya semoga anda puas ya dan jangan lupa tinggalkan rating nya bro biar kita sama sama enak dan ini no wa ane 0895418099333 kalau ada masalah silakan chat aja dengan ane ya salam good luck p-store.net/user/khaidiraje'
print B+BL+'#-----------------------------------------#'
print B+R+' \!/WARNING\!/'
print B+BL+'#-----------------------------------------#'
ini_url = raw_input (B+Y+"[+] Masukan Url Visitor : ")
print ''
print B+Y+'[+] Url Visitor Anda => '+B+BL+'|'+B+W,ini_url
print B+BL+'#-----------------------------------------#'
def Autoclicker(proxy1):
try:
proxy = proxy1.split(":")
print B+BL+"#-----------------------------------------#\n"+B+W+'[-]',proxy1, ""+B+P+"=> Process"+N
time.sleep(2)
proxy_set = urllib2.ProxyHandler({"http" : "%s:%d" % (proxy[0], int(proxy[1]))})
opener = urllib2.build_opener(proxy_set, urllib2.HTTPHandler)
opener.addheaders = [('User-agent', random.choice(bacod)),
('Refferer', random.choice(gblk))]
urllib2.install_opener(opener)
f = urllib2.urlopen(ini_url)
#187034
if "google.com" in f.read():
print B+G+"[*] 200 OK"+"\n"+B+BL+"#-----------------------------------------#\n"+N
else:
print B+R+"[*] Link Gagal Di Kunjungi !\n"+B+BL+"#-----------------------------------------#\n"+N
print B+R+"[!] Proxy / Connection Failed\n"+B+BL+"#-----------------------------------------#\n"+N
except:
print B+R+"[!] Proxy Error\n"+B+BL+"#-----------------------------------------#\n"+N
time.sleep(5)
pass
def loadproxy():
try:
get_file = open(proxy_list, "r")
proxylist = get_file.readlines()
count = 0
proxy = []
while count < len(proxylist):
proxy.append(proxylist[count].strip())
count += 1
for i in proxy:
Autoclicker(i)
except IOError:
print B+W+"\n[-] Error : Proxy List Tidak Ditemukan / Belum Dibuat\n"+N
sys.exit(1)
def main():
print """
"""+N
loadproxy()
if __name__ == '__main__':
main() |
import random
a = int(random.random() * 100) + 1
print(a)
b = int(random.random() * 900) + 100
print(b)
c = int(random.random() * (ord("Z")-ord("A"))) + ord("A")
print(chr(c))
d = int(random.random() * 99 ) + 1
print(d)
if d%2==0:
print("True")
else:
print("false") |
import numpy as np
import sys
import math
from aresta import Aresta
from heapsort import heapSort
def makeSet(qtdeVertices):
conjunto = []
for i in range(qtdeVertices):
conjunto.append([])
conjunto[i].append(i)
return conjunto
def makeArestas(matriz,qtdeVertices):
arestas = []
for i in range((qtdeVertices-1)):
for j in range(i+1,qtdeVertices):
if(matriz[i][j] > 0):
arestas.append(Aresta(i,j,matriz[i][j]))
return arestas
def isDifferent(conjunto, verticeA, verticeB):
if(len(conjunto[verticeA]) == len(conjunto[verticeB])):
for i in range(len(conjunto[verticeA])):
if((conjunto[verticeA][i] in conjunto[verticeB]) == False ):
return True
else:
return True
return False
def union(conjunto,verticeA,verticeB):
for i in range(len(conjunto[verticeB])):
conjunto[verticeA].append(conjunto[verticeB][i])
for j in range(1,len(conjunto[verticeA])):
conjunto[conjunto[verticeA][j]] = conjunto[verticeA]
def kruskal(matriz,qtdeVertices):
mstp = 0
conjunto = makeSet(qtdeVertices)
arestas = makeArestas(matriz,qtdeVertices)
heapSort(arestas)
for i in range(len(arestas)):
if((isDifferent(conjunto, arestas[i].verticeA, arestas[i].verticeB)) == True):
mstp += arestas[i].w
union(conjunto,arestas[i].verticeA,arestas[i].verticeB)
print('MSTP: ',mstp)
#############################################
def criarMatrizQuadrada(n, matriz):
for i in range(0, n):
matriz.append([])
for j in range(0, n):
matriz[i].append(0)
def lerArquivo():
filename = 'instancias/' + sys.argv[1]
f = open(filename, 'r')
qtdeVertices = int(f.readline())
qtdeArestas = 0
matrizAdjacencia = []
criarMatrizQuadrada(qtdeVertices, matrizAdjacencia)
for i in range(qtdeVertices-1):
linha = f.readline()
elementos = linha.rsplit()
aux = 0
for j in range(i+1, qtdeVertices):
matrizAdjacencia[i][j] = int(elementos[aux])
matrizAdjacencia[j][i] = int(elementos[aux])
aux += 1
return matrizAdjacencia,qtdeVertices
if __name__ == '__main__':
entrada,qtdeVertices = lerArquivo()
kruskal(entrada,qtdeVertices)
|
import key_generator.key_generator as key
import threading
Sk = key.generate(num_of_atom=9,seed=1)
print(Sk.get_key())
|
from collections import Counter, defaultdict
def letter_frequency(text):
by_value = defaultdict(list)
for k, v in Counter(a.lower() for a in text if a.isalpha()).items():
by_value[v].append((k, v))
result = []
for key in sorted(by_value, reverse=True):
result.extend(sorted(by_value[key]))
return result
|
"""add user banned
Revision ID: 4f28090745df
Revises: 1782e1af3cc2
Create Date: 2015-11-18 09:59:26.930530
"""
# revision identifiers, used by Alembic.
revision = '4f28090745df'
down_revision = '1782e1af3cc2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('banned', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'banned')
### end Alembic commands ###
|
# Import the numpy package to compute the image and objective function
import numpy as np
# Import the superclass (also called base class), which is an abstract class,
# to implement the subclass AckleyFunction
from ObjectiveFunction import *
from ImageMetrics import *
# The subclass that inherits of ObjectiveFunction
class LampGlobalFitnessFunction(ObjectiveFunction):
# Constructor
# aNumberOfDimensions: the number of dimensions (e.g. how many parameters)
def __init__(self, aWidth, aLength, aNumberOfLamps, aLampRadius):
# Store the class attributes
self.room_width = aWidth; # cols of the image
self.room_length = aLength; # rows of the image
self.lamp_radius = aLampRadius;
self.number_of_lamps = aNumberOfLamps;
# Store the boundaries
self.boundaries = [];
for _ in range(self.number_of_lamps):
self.boundaries.append([0, self.room_width - 1]);
self.boundaries.append([0, self.room_length - 1]);
self.boundaries.append([0, 1]);
# Call the constructor of the superclass
super().__init__(3 * aNumberOfLamps,
self.boundaries,
self.objectiveFunction,
2);
# The name of the function
self.name = "Lamp Problem";
self.reference_image = np.ones((self.room_length, self.room_width));
self.metrics_function = getSSIM;
# objectiveFunction implements the Ackley function
def objectiveFunction(self, aSolution, aLogFlag = True):
metrics = self.metrics_function(self.reference_image, self.createImage(aSolution));
if aLogFlag:
self.global_fitness = metrics;
return metrics;
def createImage(self, aSolution):
# Create a black image
test_image = np.zeros((self.room_length, self.room_width));
# Process every lamp
for i in range(round(len(aSolution) / 3)):
# The lamp is on
if aSolution[i * 3 + 2] > 0.5:
# Add the lamp
test_image = np.add(test_image, create_circular_mask(self.room_width, self.room_length, aSolution[i * 3], aSolution[i * 3 + 1], self.lamp_radius));
# Return the new image
return test_image;
def saveImage(self, aSolution, aFileName):
test_image = self.createImage(aSolution.parameter_set);
np.savetxt(aFileName, test_image);
class LampLocalFitnessFunction(ObjectiveFunction):
def __init__(self, aGlobalFitnessFunction):
number_of_dimensions = 3;
self.boundaries = [];
for i in range(number_of_dimensions - 1):
self.boundaries.append(aGlobalFitnessFunction.boundaries[i]);
self.boundaries.append([1, 1]);
super().__init__(number_of_dimensions,
self.boundaries,
self.objectiveFunction,
2); # Maximisation
self.global_fitness_function = aGlobalFitnessFunction;
def objectiveFunction(self, aSolution):
# Global fitness with individual
global_fitness_with_individual = self.global_fitness_function.global_fitness;
global_fitness_without_individual = self.global_fitness_function.objectiveFunction(aSolution, False);
# Leave-out-cross-validation (marginal fitness)
if self.global_fitness_function.flag == 1:
marginal_fitness = global_fitness_without_individual - global_fitness_with_individual;
else:
marginal_fitness = global_fitness_with_individual - global_fitness_without_individual;
return (marginal_fitness);
def create_circular_mask(w, h, x, y, radius):
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - x)**2 + (Y-y)**2)
mask = dist_from_center <= radius
return mask
|
# Author - Joshua Schoonmaker
# StarWarsName.py
# CIS 125 IWU
# Week 3 Star Wars Name Assignment
#
def main():
# Initial input requests
strFirstName = input("Please enter your first name: ")
strLastName = input("Please enter your last name: ")
strMaidenName = input("Please enter your mother's maiden name: ")
strBirthCity = input("Please enter the name of the city in which you were born: ")
print()
# Setting chopped inputs to new variables
a = strFirstName[0:2]
b = strLastName[0:3]
c = strMaidenName[0:2]
d = strBirthCity[0:3]
# Adding together new variables
print(b + a, c + d)
main() |
import random
from sys import argv
file_path = argv[1]
def open_and_read_file(file_path):
"""Takes file path as string; returns text as string.
Takes a string that is a file path, opens the file, and turns
the file's contents as one string of text.
"""
text_string = open(file_path).read()
return text_string
def make_chains(text_string):
"""Takes input as text string, returns dictionary or Markov chains"""
chains = {}
n = int(raw_input("Length of Markov chains: "))
#Split out the text string into a list of words
words = text_string.split()
#Make a key out of the text string that is n letters long
i = 0
while i < len(words) - (n+1):
print words
key = words[i:n+i]
print key
value = words[i+n]
print value
key = tuple(key)
#check is the key is in the dictionary, or put it in
if key not in chains:
chains[key] = []
chains[key].append(value)
else:
chains[key].append(value)
i += 1
print i
print chains
def make_text(chains):
"""Takes dictionary of markov chains; returns random text."""
text = ""
key = random.choice(chains.keys())
text = (' ').join(key)
while key in chains.keys():
val_list = chains[key]
word3 = random.choice(val_list)
text = text + ' '+ word3
key = (key[1], word3)
return text
# Open the file and turn it into one long string
input_text = open_and_read_file(file_path)
# Get a Markov chain
chains = make_chains(input_text)
# # Produce random text
# random_text = make_text(chains)
# print random_text
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 08:49:07 2020
@author: scott
"""
# dataframe imports
import pandas as pd
# plotly
from plotly import subplots
from plotly import graph_objs as go
import plotly.express as px
# =============================================================================
# #### Import data
# =============================================================================
ejames_volume = pd.read_csv('ejames_volume.csv')
yesler_volume = pd.read_csv('yesler_volume.csv')
stream_volume = pd.read_csv('stream_volume.csv')
sunset_volume = pd.read_csv('sunset_volume.csv')
block11_volume = pd.read_csv('block11_volume.csv')
growA_volume = pd.read_csv('growA_volume.csv')
growB_volume = pd.read_csv('growB_volume.csv')
ejames_peakyness = pd.read_csv('ejames_peakyness.csv')
yesler_peakyness = pd.read_csv('yesler_peakyness.csv')
stream_peakyness = pd.read_csv('stream_peakyness.csv')
sunset_peakyness = pd.read_csv('sunset_peakyness.csv')
block11_peakyness = pd.read_csv('block11_peakyness.csv')
growA_peakyness = pd.read_csv('growA_peakyness.csv')
growB_peakyness = pd.read_csv('growB_peakyness.csv')
# =============================================================================
# #### Scatter Plots
# =============================================================================
fig = px.scatter(ejames_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("ejames_scatter.html")
fig = px.scatter(yesler_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("yesler_scatter.html")
fig = px.scatter(stream_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("stream_scatter.html")
fig = px.scatter(sunset_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("sunset_scatter.html")
fig = px.scatter(block11_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("block11_scatter.html")
fig = px.scatter(growA_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("growA_scatter.html")
fig = px.scatter(growB_peakyness, x='dates', y="value", hover_data=['dates'])
fig.write_html("growB_scatter.html")
# =============================================================================
# #### Cummulative Histograms
# =============================================================================
x = ejames_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("ejames_hist.html")
x = yesler_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("yesler_hist.html")
x = stream_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("stream_hist.html")
x = sunset_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("sunset_hist.html")
x = block11_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("block11_hist.html")
x = growA_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("growA_hist.html")
x = growB_peakyness['value']
fig = go.Figure(data=[go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')])
fig.write_html("growB_hist.html")
# =============================================================================
# #### Peakyness Plots
# =============================================================================
# peakyness scatterplots for individual buildings
fig = px.scatter(ejames_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("ejames_peakyness.html")
fig = px.scatter(block11_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("block11_peakyness.html")
fig = px.scatter(yesler_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("yesler_peakyness.html")
fig = px.scatter(stream_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("stream_peakyness.html")
fig = px.scatter(sunset_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("sunset_peakyness.html")
fig = px.scatter(growA_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("growA_peakyness.html")
fig = px.scatter(growB_peakyness, x='value', y="peak_norm", hover_data=['dates', 'peak_hours'])
fig.write_html("growB_peakyness.html")
# combo peakyness scatterplots
df = ejames_peakyness.append(yesler_peakyness).append(stream_peakyness).append(sunset_peakyness).append(block11_peakyness).append(growA_peakyness).append(growB_peakyness)
fig = px.scatter(df, x='value', y="peak_norm", color = 'site',
hover_data=['dates', 'peak_hours', 'site'])
fig.write_html("sites_peakyness.html")
# =============================================================================
# #### Peakyness Distributions
# =============================================================================
# ejames
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = ejames_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = ejames_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="ejames peakyness distributions")
fig.write_html("ejames_peakyness_distribution.html")
# sunset
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = sunset_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = sunset_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="sunset peakyness distributions")
fig.write_html("sunset_peakyness_distribution.html")
# yesler
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = yesler_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = yesler_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="yesler peakyness distributions")
fig.write_html("yesler_peakyness_distribution.html")
# stream
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = stream_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = stream_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="stream peakyness distributions")
fig.write_html("stream_peakyness_distribution.html")
# block11
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = block11_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = block11_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="block11 peakyness distributions")
fig.write_html("block11_peakyness_distribution.html")
# growA
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = growA_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = growA_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="growA peakyness distributions")
fig.write_html("growA_peakyness_distribution.html")
# growB
fig = subplots.make_subplots(rows=1,
cols=2,
start_cell="bottom-left",
subplot_titles=('Total Volume [gals]', 'Peak Norm'))
x = growB_peakyness['value']
trace1 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
x = growB_peakyness['peak_norm']
trace2 = go.Histogram(x=x, cumulative_enabled=True, histnorm='probability density')
fig.append_trace(trace1, row=1,col=1)
fig.append_trace(trace2, row=1,col=2)
fig.update_layout(title_text="growB peakyness distributions")
fig.write_html("growB_peakyness_distribution.html")
|
import itertools
class Game:
def __init__(self):
self.board = [[None] * 3 for _ in range(3)]
self.turns = itertools.cycle('OX')
self._switch_player()
def _switch_player(self):
self.current_player = next(self.turns)
def _has_winner(self):
def is_equal_row(row):
return all(row) and len(set(row)) == 1
inverted_board = [[line[i] for line in self.board] for i in range(3)]
return any([
any(is_equal_row(line) for line in self.board),
any(is_equal_row(line) for line in inverted_board),
is_equal_row([self.board[0][0], self.board[1][1], self.board[2][2]]),
is_equal_row([self.board[2][0], self.board[1][1], self.board[0][2]])
])
def _is_full(self):
return all(all(line) for line in self.board)
def play(self, row, column):
if self.board[row][column]:
raise ValueError("invalid")
self.board[row][column] = self.current_player
if self._has_winner():
raise ValueError("winner")
elif self._is_full():
raise ValueError("loosers")
self._switch_player()
|
import argparse
from PIL import Image
from PIL.ImageSequence import Iterator as gifiter
import numpy as np
import math
def psnr(im1, im2):
'''
Peak Signal to Noise Ratio, PSNR
im1, im2: path to original and SR
'''
im1,im2 = im1.convert('RGB'),im2.convert('RGB')
im1,im2 = np.array(im1,dtype=np.float64),np.array(im2,dtype=np.float64)
assert im1.shape == im2.shape
diff = (im1 - im2).flatten()
mse = np.mean(diff**2)
if mse == 0:
return np.infty
else:
return 10*math.log10(255**2/mse)
def ssim(im1, im2, K1=0.01, K2=0.03):
'''
Structural Similarity, SSIM
Average of 3 channels of RGB format, in range of (0,1]
im1, im2: path to original and SR
'''
im1,im2 = im1.convert('RGB'),im2.convert('RGB')
im1,im2 = np.array(im1,dtype=np.float64),np.array(im2,dtype=np.float64)
assert im1.shape == im2.shape
h,w,c = im1.shape
def channel_mean(ch):
return np.mean(ch.flatten())
def channel_error(ch):
m = channel_mean(ch)
e = np.sum(((ch-m).flatten())**2)/(h*w-1)
return m, e
def channel_conv(ch1, ch2):
(m1,e1),(m2,e2) = channel_error(ch1),channel_error(ch2)
conv = np.sum(((ch1-m1)*(ch2-m2)).flatten())/(h*w-1)
return m1,m2,e1,e2,conv
def channel_ssim(ch1, ch2, K1=K1, K2=K2):
C1 = (K1*255)**2
C2 = (K2*255)**2
C3 = C2/2
m1,m2,e1,e2,conv = channel_conv(ch1, ch2)
l = (2*m1*m2+C1)/(m1**2+m2**2+C1)
c = (2*np.sqrt(e1*e2)+C2)/(e1+e2+C2)
s = (conv+C3)/(np.sqrt(e1*e2)+C3)
return l*c*s
ssim = 0
for i in range(3):
ssim += channel_ssim(im1[:,:,i],im2[:,:,i],K1=K1,K2=K2)
ssim = ssim/3
return ssim
def video_score(v1, v2):
'''
PSNR and SSIM of two GIF files computed by averaged scores across all frame pairs
'''
s1s,s2s = [],[]
i = 0
while True:
try:
frame1,frame2 = gifiter(v1)[i],gifiter(v2)[i]
frame1,frame2 = frame1.convert('RGB'),frame2.convert('RGB')
s1s.append(psnr(frame1,frame2))
s2s.append(ssim(frame1,frame2))
i+=1
except IndexError:
break
s1s,s2s = np.array(s1s),np.array(s2s)
return np.mean(s1s),np.mean(s2s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='image or video', choices=['image','video'], required=True)
parser.add_argument('--file1', help='path of first file', required=True)
parser.add_argument('--file2', help='path of second file', required=True)
args = parser.parse_args()
mode, file1, file2 = args.mode, args.file1, args.file2
if mode == 'image':
im1,im2 = Image.open(file1),Image.open(file2)
s1,s2 = pnsr(im1,im2),ssim(im1,im2)
else:
v1,v2 = Image.open(file1),Image.open(file2)
s1,s2 = video_score(v1,v2)
print('PSNR: %.2f, SSIM:%.2f' %(s1,s2))
|
import random
import pygame
from pygame.locals import *
class Base(object):
def __init__(self, screen_temp, x, y, image_name):
self.x = x
self.y = y
self.screen = screen_temp
# create a plane
self.image = pygame.image.load(image_name)
class Plane(Base):
def __init__(self, screen_temp, x, y, image_name):
Base.__init__(self, screen_temp, x, y, image_name)
self.bullet_list = [] # bullet showing on screen
# self.bullet_list_left = []
# self.bullet_list_right = []
def display(self):
self.screen.blit(self.image, (self.x, self.y))
for bullets in self.bullet_list:
bullets.display_bullet()
bullets.move()
if bullets.judge(): # check if bullets out of margin
self.bullet_list.remove(bullets)
class BaseBullet(Base):
def display_bullet(self):
self.screen.blit(self.image, (self.x, self.y))
class HeroPlane(Plane):
def __init__(self, screen_temp):
Plane.__init__(self, screen_temp, 200, 700, './fighter/image/hero1.png')
def display(self):
# show player plane
self.screen.blit(self.image, (self.x, self.y))
''', self.bullet_list_left, self.bullet_list_right:'''
for bullets in self.bullet_list:
bullets.display_bullet()
bullets.display_bullet_left()
bullets.display_bullet_right()
bullets.move()
if bullets.judge(): #check if bullets out of margin
self.bullet_list.remove(bullets)
# 这个方法容易漏删, 因为删除一个元素后面的往前补,会跳过一个,而这里display()会不不断调用,所以没有这个问题
# 实际开发的时候把想要删除的元素先存进另一个列表里
# a = [1,2,3,4,5,6,7] b = [3,4]
# for i in b:
# a.remove(i)
def move_left(self):
self.x -= 15
def move_right(self):
self.x += 15
def move_up(self):
self.y -= 15
def move_down(self):
self.y += 15
def fire(self):
self.bullet_list.append(Bullet(self.screen, self.x, self.y))
class Bullet(BaseBullet): # hero plane bullet
def __init__(self, screen_temp, x, y):
BaseBullet.__init__(self, screen_temp, x+40, y-25, './fighter/image/bullet.png')
self.left_x = x
self.left_y = y + 10
self.right_x = x + 80
self.right_y = y + 10
#def display_bullet_mid(self):
# self.screen.blit(self.image, (self.x, self.y))
def display_bullet_left(self):
self.screen.blit(self.image, (self.left_x, self.left_y))
def display_bullet_right(self):
self.screen.blit(self.image, (self.right_x, self.right_y))
def move(self):
self.y -= 8
self.right_y -= 8
self.left_y -= 8
self.right_x += 1
self.left_x -= 1
def judge(self):
if self.left_y < 10:
return True
else:
return False
class Enemy (Plane):
"""create enemy plane"""
def __init__(self, screen_temp):
Plane.__init__(self, screen_temp, 0, 0, './fighter/image/enemy0.png')
self.ran_left = random.randint(30, 130)
self.ran_right = random.randint(150, 250)
self.x = random.randint(50, 350)
#self.y = 0
self.direction = 'right'
self.enemy_list = []
def enemy_move(self):
"""image width is 480"""
self.y += 0.5
if self.x > self.ran_right:
self.direction = 'left'
elif self.x <= self.ran_left:
self.direction = 'right'
if self.direction == 'right':
self.x += 0.5
elif self.direction == 'left':
self.x -= 0.5
def fire(self):
#由于程序在一个while循环内,不能用sleep,所以写一个random,当随机到特定数字时再调用
#if random.randint(1, 00) == 25:
random_num = random.randint(1, 200)
if random_num == 120 or random_num == 90:
self.bullet_list.append(EnemyBullet(self.screen, self.x, self.y))
class EnemyBullet(BaseBullet):
def __init__(self, screen_temp, x, y):
BaseBullet.__init__(self, screen_temp, x+22, y+40, './fighter/image/bullet1.png')
def move(self):
self.y += 5
def judge(self):
if self.y > 830:
return True
else:
return False
def key_control(hero_temp):
key_state = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print('Game Exit')
exit()
if key_state[K_RIGHT]:
print('move to right')
hero_temp.x += 10
if key_state[K_LEFT]:
print('move to left')
hero_temp.x -= 10
if key_state[K_UP]:
print('move to top')
hero_temp.y -= 10
if key_state[K_DOWN]:
print('move to right')
hero_temp.y += 10
if key_state[K_SPACE]:
print('space/shoot')
hero_temp.fire()
"""
# check if player press any key
elif event.type == pygame.KEYDOWN:
if event.key == K_a or event.key == K_LEFT: # K_LEFT = pygame.K_LEFT
print('move left')
hero_temp.move_left()
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
print('move right')
hero_temp.move_right()
elif event.key == pygame.K_w or event.key == pygame.K_UP:
print('move top')
hero_temp.move_up()
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
print('move down')
hero_temp.move_down()
elif event.key == pygame.K_SPACE:
print('space/shoot')
hero_temp.fire()
"""
def main():
# create window
screen = pygame.display.set_mode((480, 852), 0, 32)
background = pygame.image.load('./fighter/image/background.png')
# create a plane object
hero = HeroPlane(screen)
enemy_0 = Enemy(screen)
screen_frame = 0
while True:
# show background image
screen.blit(background, (0, 0))
hero.display()
enemy_0.display()
enemy_0.enemy_move()
enemy_0.fire()
# update content to be show in screen/fresh screen
pygame.display.update()
key_control(hero)
screen_frame += 1
if __name__ == '__main__':
main()
|
import csv
import boto3
import pprint
# asg_list = [
# 'AWOR-PDMESCIO01-ASG',
# 'AWOR-PDORAAPX01-asg',
# 'AWOR-PDQLKGEO01-asg',
# 'AWOR-PDQMSWEB02-asg',
# 'AWOR-QALABAPP01-asg',
# 'AWOR-TSDWHAPP01-asg',
# 'AWOR-TSOLAPDB01-asg',
# 'AWOR-TSOLAPPB01-asg',
# 'AWOR-TSPDMLAS01-asg',
# 'AWOR-TSSASAPP01-asg',
# 'AwOr-PdMovXfr01-asg',
# 'AwOr-TsMovApp01-asg',
# 'IRVDIGWEB13-asg',
# 'IRVHFMFDM02-asg',
# 'IRVINE724-asg',
# 'IRVINE739-asg',
# 'IRVLBLPRG01-asg',
# 'IRVLBLPRG02-asg',
# 'IRVLBLSPC01-asg',
# 'IRVMETCAL02-asg',
# 'IRVOLBECM01-PRD-asg',
# 'IRVOLBECM01-QA-asg',
# 'IRVOLBSCH01-PRD-asg',
# 'IRVOLBSCH01-QA-asg',
# 'IRVORACLN11-asg',
# 'IRVORACLN12-asg',
# 'IRVORACLN14-asg',
# 'IRVORACLN15-asg',
# 'IRVQMSAPP01-asg',
# 'IRVTHVSAS01-asg',
# 'IRVVALGEN01-asg',
# 'IrvITRSPT04-asg',
# 'IrvLABAPP02-asg',
# 'IrvLABSCH02-asg',
# 'IrvQMSWEB01-asg',
# 'Irvine837-asg',
# 'Irvine882-asg',
# 'Irvine902-asg',
# 'asadmin-common-fe-proxy-asg-ProxyServerASG-Z68E5W7PLU43',
# 'awor-qalabapp02-asg',
# 'awor-tswplapp01-asg',
# 'edwards-prod-ibcm-fe-proxy-asg-ProxyServerASG-5JTOPR2O9CYS',
# 'edwards-prod-qlik-fe-proxy-asg-ProxyServerASG-K9NW3CEF93GH',
# 'edwards-test-exch-fe-proxy-asg-ProxyServerASG-OSAN5QUPF3BY',
# 'irvjdemex02-asg',
# 'irvjdepit07-asg',
# 'irvlabapp01-asg',
# 'irvlblspc01-oq-asg',
# 'AWOR-DVMESWRK01-ASG']
# test_list = ['lab1', 'dmlab-common-core-vpnserver-VPNServerASG-XUIAMNKXXFF']
# client = boto3.client('autoscaling')
# for asgs in test_list:
# client.put_notification_configuration(
# AutoScalingGroupName=asgs,
# # TopicARN='arn:aws:sns:us-west-2:590992000271:edwards-scaling-topic',
# TopicARN='arn:aws:sns:us-west-2:753955134882:Topic1',
# NotificationTypes=[
# 'autoscaling:EC2_INSTANCE_LAUNCH',
# 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
# 'autoscaling:EC2_INSTANCE_TERMINATE',
# 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
# ]
# )
# print(asgs)
# def get_asg_list(client):
client = boto3.client('autoscaling')
# with open('test.csv', 'r') as f:
with open('asg_no_notifications.csv', 'r') as f:
reader = csv.reader(f)
asg_no_notifications = list(reader)
for asgs in asg_no_notifications:
for asg in asgs:
client.put_notification_configuration(
AutoScalingGroupName=asg,
TopicARN='arn:aws:sns:us-west-2:590992000271:edwards-scaling-topic',
# TopicARN='arn:aws:sns:us-west-2:753955134882:Topic1',
NotificationTypes=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
]
)
print(asg)
# for asgs in asg_no_notifications:
# client.put_notification_configuration(
# AutoScalingGroupName=asgs,
# # TopicARN='arn:aws:sns:us-west-2:590992000271:edwards-scaling-topic',
# TopicARN='arn:aws:sns:us-west-2:753955134882:Topic1',
# NotificationTypes=[
# 'autoscaling:EC2_INSTANCE_LAUNCH',
# 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
# 'autoscaling:EC2_INSTANCE_TERMINATE',
# 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
# ]
# )
# print(asgs)
# return(asg_no_notifications)
# f = open('asg_2.txt', 'r')
# f = open('test.txt', 'r')
# output = f.read()
# print(output)
# def main():
# client = boto3.client('autoscaling')
# asg_list = get_asg_list(client)
# length = len(asg_list)
# for i in range(length):
# print(asg_list[i])
# asg_pnc = client.put_notification_configuration(
# AutoScalingGroupName=new_list,
# TopicARN='arn:aws:sns:us-west-2:590992000271:edwards-scaling-topic',
# # TopicARN='arn:aws:sns:us-west-2:753955134882:Topic1',
# NotificationTypes=[
# 'autoscaling:EC2_INSTANCE_LAUNCH',
# 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
# 'autoscaling:EC2_INSTANCE_TERMINATE',
# 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
# ]
# )
# print(asg_pnc)
# if __name__ == '__main__':
# main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.