text stringlengths 8 6.05M |
|---|
#
# Module Imports
import os
import configparser
#
class ConfigParser:
"""
Class responsible for parsing the main configuration file.
"""
#################
# Private members
__config = None
#################
def __init__(self, config_file_path):
"""
Constructor.
:param config_file_path: Absolute path to the configuration file. Config file is expected to follow the
standard INI file format.
"""
self.reload_config_file(config_file_path)
def reload_config_file(self, config_file_path):
"""
Reload configuration file.
:param config_file_path: Absolute path to the configuration file. Config file is expected to follow the
standard INI file format.
:return: None.
"""
self.__validation(config_file_path)
self.__config = configparser.ConfigParser()
self.__config.read(config_file_path)
def get_value(self, section_name, key_name):
"""
Retrieve configuration value.
:param section_name: Config section name.
:param key_name: Config key name.
:return: Config value.
"""
result = None
if section_name in self.__config and key_name in self.__config[section_name]:
result = self.__config[section_name][key_name]
return result
@staticmethod
def __validation(config_file_path):
"""
Validation.
:param config_file_path: Absolute path to the configuration file.
:return: None.
:raises ValueError in case config file does not exist.
"""
if not os.path.isfile(config_file_path):
raise ValueError('File not found.')
#
class EnvVarLoader:
"""
This class acts under the the singleton design pattern, used for environment variable loading + referencing
"""
#####################
## Private Members ##
__instance = None
__env_vars = {}
#####################
#
def __init__(self):
"""
Virtual constructor, ensuring that the class behaves under the Singleton design pattern
"""
if EnvVarLoader.__instance is not None:
raise Exception("This class is a singleton!")
else:
EnvVarLoader.__instance = self
#
@staticmethod
def getInstance():
"""
Invokes singleton instance
:return: Singleton
"""
if EnvVarLoader.__instance is None:
EnvVarLoader()
return EnvVarLoader.__instance
#
@staticmethod
def var_load(var_dict):
"""
Loads variables inside singleton through a dictionary
:param var_dict: key, value pairs pertaining to variable name + value
:return:
"""
for key, val in var_dict.items():
if key not in EnvVarLoader.__env_vars:
EnvVarLoader.__env_vars[key] = val
else:
raise Exception("Variable [" + str(key) + "] already loaded!")
#
@staticmethod
def var_get(var_name=None):
"""
Invokes Singleton dictionary and returns variable by input key.
If key is not passed as param, return all dictionary.
:param var_name:
:return:
"""
if var_name is None:
return EnvVarLoader.__env_vars # Returns entire dictionary
elif var_name not in EnvVarLoader.__env_vars:
raise LookupError("Environment variable name not found!")
return EnvVarLoader.__env_vars[var_name] # Returns variable value
#
# Defines env var object
ev_loader = EnvVarLoader.getInstance()
|
squares = {1: 1, 2: 4, 3: 9}
for value in squares.values():
print(value, end=" ") # 1 4 9
print(squares.keys()) |
#!/usr/bin/env python
# import rospy
# from system_statuses.msg import Camera2Changer
# def camera_2_changer():
# pub = rospy.Publisher('camera_2_changer_chatter', Camera2Changer, queue_size=10)
# rospy.init_node('camera_2_changer_talker', anonymous=True)
# # r = rospy.sleep(10) # 10hz
# msg = Camera2Changer()
# msg.camera_2_value = 0
# while not rospy.is_shutdown():
# msg.camera_2_value = (msg.camera_2_value + 1) % 2
# rospy.loginfo(msg)
# pub.publish(msg)
# r.sleep()
## rospy.sleep(2.)
#if __name__ == '__main__':
# try:
# camera_2_changer()
# except rospy.ROSInterruptException:
# pass
|
first = ["behind", "every", "great", "man"]
second = ["is", "a", "woman"]
third = ["rolling", "here", "eyes"]
joined = first + second + third
print(joined)
|
"""
Created by Alex Wang on 2018-03-19
tf.data.Dataset
"""
import numpy as np
import tensorflow as tf
import cv2
def dataset_one_shot():
"""
Dataset一次遍历,不需要显式初始化,不支持参数化
:return:
"""
array = np.array(range(10))
dataset = tf.data.Dataset.from_tensor_slices(array)
iterator = dataset.make_one_shot_iterator()
next_one = iterator.get_next()
sess = tf.Session()
for i in range(10):
next_value = sess.run([next_one])
print(next_value)
def dataset_initialized():
"""
初始化的Dataset,支持placeholder参数
:return:
"""
array = np.array(range(10))
array_two = np.array(range(10, 20))
param = tf.placeholder(tf.int32, shape=[len(array)], name='param')
dataset = tf.data.Dataset.from_tensor_slices(param)
iterator = dataset.make_initializable_iterator()
next_one = iterator.get_next()
sess = tf.Session()
sess.run(iterator.initializer, feed_dict={param: array})
for i in range(10):
next_value = sess.run([next_one])
print(next_value)
sess.run(iterator.initializer, feed_dict={param: array_two})
for i in range(10):
next_value = sess.run([next_one])
print(next_value)
def _parse_tfrecords_func(record):
"""
解析tfrecords数据
:param record:
:return:
"""
features = {"img": tf.FixedLenFeature((),tf.string, default_value = ''),
"label": tf.FixedLenFeature((), tf.int64, default_value=0),
"width": tf.FixedLenFeature((), tf.int64, default_value=0),
"height": tf.FixedLenFeature((), tf.int64, default_value=0),
"channel": tf.FixedLenFeature((), tf.int64, default_value=0)}
parsed_features = tf.parse_single_example(record, features)
for key in parsed_features:
print(key, type(parsed_features[key]))
print(type(parsed_features['img']))
img = tf.decode_raw(parsed_features['img'], tf.uint8)
img_reshape = tf.reshape(img, (tf.stack([parsed_features['width'], parsed_features['height'], parsed_features['channel']])))
return img, parsed_features['width'], parsed_features['height'], parsed_features['channel'], img_reshape
def dataset_tfrecords():
"""
Dataset读tfrecords
:return:
"""
tfrecords_files = ['tfrecords_example']
dataset = tf.data.TFRecordDataset(tfrecords_files)
dataset = dataset.map(_parse_tfrecords_func)
dataset.repeat()
iterator = dataset.make_initializable_iterator()
next_elem = iterator.get_next()
sess = tf.Session()
sess.run(iterator.initializer)
for i in range(1):
next_elem_value = sess.run(next_elem)
print(type(next_elem_value))
img, img_width, img_height, img_channel, img_reshape = next_elem_value
cv2.imshow('img_reshape', img_reshape)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
# dataset_one_shot()
# dataset_initialized()
dataset_tfrecords() |
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'jdroot'
from pylons.controllers import WSGIController
from pylons import app_globals, request, session
from pylons import config
from pycloud.manager.lib import auth
# Creating this just to have it for the future
class BaseController(WSGIController):
def __call__(self, environ, start_response):
action = request.environ['pylons.routes_dict'].get('action')
if action:
method = request.method.upper()
if method == 'HEAD':
method = 'GET'
handler_name = method + '_' + action
#request.environ['pylons.routes_dict']['action_name'] = action
request.environ['pylons.routes_dict']['action'] = handler_name
self.environ = environ
ret = WSGIController.__call__(self, environ, start_response)
return ret
def __before__(self):
# Only check authentication if we are not the authentication controller.
if type(self).__name__ != app_globals.cloudlet.auth_controller and app_globals.cloudlet.auth_enabled == 'true':
# Ensure authentication.
auth.ensure_authenticated()
# Make the database available on every request
self.cloudlet = app_globals.cloudlet
self.pre()
def __after__(self):
self.post()
def pre(self):
pass
def post(self):
pass
# Get a boolean from a request param
def bool_param(name, default=False):
ret = request.params.get(name, default)
if not isinstance(ret, bool):
ret = ret.upper() in ['T', 'TRUE', 'Y', 'YES']
return ret |
from flask.ext.wtf import Form
from wtforms import TextField
from wtforms.validators import Required
class VerifyHandphoneForm(Form):
handphone_hash = TextField('Enter verification code here', validators=[Required()]) |
import markdown
from pykwiki.core import conf, Post
POST_RE = r'(\[\[([a-zA-Z0-9]+.*?)\]\])'
SEC_START_RE = r'(\{section:(.*?)\})'
SEC_END_RE = r'(\{endsection\})'
class SectionStartPattern(markdown.inlinepatterns.Pattern):
""" Clean {section:} start tag """
def handleMatch(self, m):
return ''
class SectionEndPattern(markdown.inlinepatterns.Pattern):
""" Clean {endsection} tag """
def handleMatch(self, m):
return ''
class PostPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m):
parts = m.group(3).split(':')
post = parts[0]
action = 'link'
extra = None
if len(parts) > 1:
action = parts[1]
if len(parts) > 2:
extra = parts[2]
if not post.endswith(conf.source_ext):
post = post + conf.source_ext
pg = Post(post)
# For [[page:link]]
if action == 'link':
url = '%s/%s'%(conf.web_prefix,
post.replace(conf.source_ext, conf.target_ext))
el = markdown.util.etree.Element("a")
el.set('href', url)
el.text = markdown.util.AtomicString(pg.title)
return el
# For [[page:url]]
if action == 'url':
url = '%s/%s'%(conf.web_prefix,
post.replace(conf.source_ext, conf.target_ext))
return url
# For [[page:description]]
if action == 'description':
return pg.description
# For [[page:blurb]]
if action == 'blurb':
return pg.blurb
# For [[page:title]]
if action == 'title':
return pg.title
# For [[page:section:section_name]]
if action == 'section':
if not extra:
return 'No section name given'
sec = pg.get_section(extra, raw=False)
if not sec:
return 'Cannot find section: %s'%(extra)
el = markdown.util.etree.Element("div")
markdown.Markdown().parser.parseChunk(el, sec)
#print "="*80, "\n", sec, "\n", "="*80
return el
return 'Invalid action: %s'%(action)
class PostExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns['pykwiki.post'] = PostPattern(POST_RE, md)
md.inlinePatterns['pykwiki.post.section_start'] = SectionStartPattern(SEC_START_RE, md)
md.inlinePatterns['pykwiki.post.section_end'] = SectionEndPattern(SEC_END_RE, md)
def makeExtension(**kwargs):
return PostExtension(**kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import sys
from pacmangame import constants
import arcade
class ArcadeOutputService(arcade.Window):
"""Outputs the game state. The responsibility of the class of objects is to draw the game state on the terminal.
Stereotype:
Service Provider
Attributes:
_screen (Screen): An Arcade screen.
"""
def __init__(arcade):
"""The class constructor.
Args:
"""
pass
def clear_screen(self, score):
arcade.start_render()
arcade.draw_text(score, 510, 0, arcade.color.WHITE, 35)
def draw_actor(self, actor):
"""Renders the given actor's text on the screen.
Args:
actor (Actor): The actor to render.
"""
# It would be nice to get the image information from the Actor here and
# then pass it along to the arcade service methods, but that doesn't jive
# with the `Sprite` model very well.
actor.draw()
|
import sys
import socket
import json
import pickle
from socket import error as socket_error
from client_functions import *
from server_functions import *
"""
function: init()
parameter: None
return: none
This function will get the value for clustercfg and ddlfile
then declare them as global values
"""
def init():
global clustercfg
global csvfile
clustercfg = sys.argv[1]
csvfile = sys.argv[2]
"""
function: get_partmtd()
parameter: (string) partmtd_name: name of partition method
return: (int) partmtd: a number's version of the partition method name
This function will translate the partition method as a string
to an integer
"""
def get_partmtd(partmtd_name):
if(partmtd_name == "notpartition"):
return 0
elif(partmtd_name == "range"):
return 1
elif(partmtd_name == "hash"):
return 2
else:
return -1
"""
function: main()
parameter: none
return: none
Main function of the program
"""
def main():
init()
cfg = parse_config(clustercfg)
cat_db_name = parseUrl(cfg['catalog.hostname'])['db']
cat_node = {'url': cfg['catalog.hostname'], 'tName': cfg['tablename'].upper(), 'loop': True}
cat_db = do_connect(cat_node, clustercfg, None, 'parse_cat_db')
partmtd = get_partmtd(cfg['partition.method'])
if(partmtd == 2):
numnodes = int(cfg['partition.param1'])
else:
numnodes = int(cfg['numnodes'])
numnodes_cat_db = count_db_nodes(cat_db)
if(numnodes == numnodes_cat_db):
#identify partition method
returnVal = []
for i in range(numnodes):
#check if the table is already partitioned, if it's not then partition the table
if(cat_db[i]['partmtd'] == None or cat_db[i]['partmtd'] == 0 or cat_db[i]['partmtd'] == partmtd):
cat_db[i]['partmtd'] = partmtd
else:
print("Error: the table is already partitioned with a different method.")
sys.exit()
cat_db[i]['tNames'] = cfg['tablename']
cat_db[i]['delimiter'] = cfg['delimiter']
cat_db[i]['csvfile'] = csvfile
if (partmtd == 1):
cat_db[i]['partcol'] = cfg['partition.column']
cat_db[i]['partparam1'] = cfg['partition.node%d.param1' % (i + 1)]
cat_db[i]['partparam2'] = cfg['partition.node%d.param2' % (i + 1)]
elif (partmtd == 2):
cat_db[i]['partcol'] = cfg['partition.column']
cat_db[i]['partparam1'] = cfg['partition.param1']
else:
pass
do_connect(cat_db[i], csvfile, returnVal, 'csv')
#update catalog
cat_cp = {}
cat_cp['url'] = cfg['catalog.hostname']
cat_cp['data'] = cat_db
do_connect(cat_cp, clustercfg, returnVal, 'catalog_csv' )
for value in returnVal:
print('[' + value['url'] + ']:', value['ddlfile'], value['status'])
else:
print("Error: Number of nodes in catalog and numnodes in {} doesn't match" \
.format( clustercfg))
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django.template import Template, Context, Library
from django.core.mail import EmailMessage
from profiles.models import Applicant, EmailTemplate, Event, EventLocation, Interest, Skillset
import json
from profiles.csv_export import CsvExport
from django.contrib.admin.views.main import ChangeList
def csv_export(admn, request, queryset):
return CsvExport().csv_export(queryset)
csv_export.short_description = "Export selected records as csv"
admin.site.add_action(csv_export, 'csv_export')
def csv_export_all(admn, request, queryset):
# We want the original criteria from the query set, but with a much higher limit
cl = ChangeList(request, admn.model, admn.list_display, admn.list_display_links, admn.list_filter, admn.date_hierarchy, admn.search_fields, admn.list_select_related, 100000, admn.list_editable, admn)
return CsvExport().csv_export(cl.get_query_set())
csv_export_all.short_description = "Export ALL records as csv (select at least one record to use)"
admin.site.add_action(csv_export_all, 'csv_export_all')
class ApplicantAdmin(admin.ModelAdmin):
def linkedin_link(self, obj):
return '<a href="%s" target="_new"><nobr><img src="/static/img/linkedin_icon.png">Profile</nobr></a>' % (obj.linkedin_url)
linkedin_link.allow_tags = True
linkedin_link.short_description = 'LinkedIn'
class Media:
js = (
"js/fd_applicant_admin.js", # first because it puts jquery back into main name space
"js/jquery-ui-1.8.13.min.js"
)
css = {
"all": ("css/jquery-ui-1.8.13.custom.css", "css/admin.css",)
}
def references(self, obj):
out = ''
if len(obj.recommend_json) > 1:
jrec = json.loads(obj.recommend_json)
for rec in jrec:
if rec['name'] != "":
if len(rec['name']) > 25:
name = rec['name'][:25] + "..."
else:
name = rec['name']
out += '<a href="mailto:' + rec['email'] + '">' + name + '</a><br />'
return out
references.allow_tags = True
def bulk_email(self, request, queryset):
emails_sent = 0
try:
subject_template = Template("{% load fd_tags %} " + request.POST.get("subject"))
message_template = Template("{% load fd_tags %} " + request.POST.get("message"))
except Exception as e:
self.message_user(request, "Error parsing template: " + str(e))
return
email = EmailMessage(
bcc = request.POST.get("bcc"),
from_email = request.POST.get("from"))
for applicant in queryset:
c = Context({"applicant": applicant, "event": applicant.event})
try:
email.subject = subject_template.render(c)
email.body = message_template.render(c)
except Exception as e:
self.message_user(request, "Error rendering message: " % str(e))
break
email.to = [request.POST.get("override_to", applicant.email)]
email.send()
emails_sent += 1
self.message_user(request, "%s e-mails sent" % emails_sent)
def email_references(self, request, queryset):
queryset.update(event_status="checking references")
emails_sent = 0
try:
subject_template = Template("{% load fd_tags %} " + request.POST.get("subject"))
message_template = Template("{% load fd_tags %} " + request.POST.get("message"))
except Exception as e:
self.message_user(request, "Error parsing template: " + str(e))
return
email = EmailMessage(
bcc = request.POST.get("bcc"),
from_email = request.POST.get("from"))
for applicant in queryset:
references = json.loads(applicant.recommend_json)
for reference in references:
c = Context({"applicant": applicant, "event": applicant.event, "reference": reference })
try:
email.subject = subject_template.render(c)
email.body = message_template.render(c)
except Exception as e:
self.message_user(request, "Error rendering message: " % str(e))
break
email.to = [request.POST.get("override_to", applicant.email)]
email.send()
emails_sent += 1
self.message_user(request, "%s reference e-mails sent" % emails_sent)
email_references.short_description = "Email the references for the selected applicants"
def email_declination(self, request, queryset):
queryset.update(event_status="denied")
self.bulk_email(request, queryset)
email_declination.short_description = "Email a declination to selected applicants"
def invite_to_event(self, request, queryset):
self.bulk_email(request, queryset)
invite_to_event.short_description = "Invite the selected candidates to event"
def email_applicant(self, request, queryset):
self.bulk_email(request, queryset)
email_applicant.short_description = "Email selected applicants"
list_display = ('name', 'event_status', 'founder_type', 'event_group', 'linkedin_link', 'references', 'comments')
list_filter = ['event', 'event_status', 'founder_type', 'event_group', 'can_start', 'idea_status']
list_editable = ('founder_type', 'event_group', 'event_status', 'comments')
date_hierarchy = "created_at"
ordering = ["-created_at"]
save_on_top = True
list_select_related = True
search_fields = ['name', 'email']
actions = [email_references, email_declination, invite_to_event, email_applicant]
radio_fields = {"founder_type": admin.HORIZONTAL}
fieldsets = (
("Basic", {
'fields': ('name', 'email', 'linkedin_url')
}),
('Event Categorization', {
'fields': ('event', 'event_status', 'founder_type', 'event_group')
}),
('Bio', {
'fields': ('can_start', 'idea_status', 'bring_skillsets_json', 'need_skillsets_json', 'recommend_json', 'interests_json', 'past_experience_blurb', 'bring_blurb', 'building_blurb')
})
)
admin.site.register(Applicant, ApplicantAdmin)
class EventAdmin(admin.ModelAdmin):
list_display = ('event_location', 'event_date')
list_filter = ['event_location']
ordering = ["-event_date"]
date_hierarchy = "event_date"
admin.site.register(Event, EventAdmin)
class EventLocationAdmin(admin.ModelAdmin):
list_display = ('display', 'city', 'state', 'country')
ordering = ["display"]
admin.site.register(EventLocation, EventLocationAdmin)
class SkillsetAdmin(admin.ModelAdmin):
list_display = ('name', 'ord')
ordering = ["ord"]
admin.site.register(Skillset, SkillsetAdmin)
class InterestAdmin(admin.ModelAdmin):
list_display = ('name', 'ord')
ordering = ["ord"]
admin.site.register(Interest, InterestAdmin)
class EmailTemplateAdmin(admin.ModelAdmin):
search_fields = ['name', 'subject', 'message']
admin.site.register(EmailTemplate, EmailTemplateAdmin)
|
#Programa para ler um vetor de 5 números e mostre o vetor.
vetor = []
i = 1
while i <= 5:
n = int(input("Digite um número: "))
vetor.append(n)
i+=1
print("Vetor lido: ", vetor)
|
import numpy as np
import matplotlib.pyplot as plt
# #Uncomment if posterior predictive is also inferred
# chain0, chain1, chain2, chain3, chain4, chain5 = np.loadtxt('pchain.dat', usecols = (0,1,2,3,4,5), unpack=True)
#Uncomment if posterior predictive is not inferred
chain0, chain1, chain2, chain3, chain4 = np.loadtxt('pchain.dat', usecols = (0,1,2,3,4), unpack=True)
MCMC_step = np.zeros((len(chain0)))
for i in range(len(chain0)):
MCMC_step[i]=i
fig = plt.figure(figsize=(10,7))
ax=fig.add_axes([0.10,0.15,0.85,0.75])
plt.plot(MCMC_step, chain0, color='red', linewidth=2, label='Mean prediction')
ax.set_xlabel("MCMC Step",fontsize=22)
ax.set_ylabel("Parameter 0",fontsize=22)
plt.legend(loc='best')
plt.savefig('Chain0.jpg')
plt.clf()
fig = plt.figure(figsize=(10,7))
ax=fig.add_axes([0.10,0.15,0.85,0.75])
plt.plot(MCMC_step, chain1, color='red', linewidth=2, label='Mean prediction')
ax.set_xlabel("MCMC Step",fontsize=22)
ax.set_ylabel("Parameter 1",fontsize=22)
plt.legend(loc='best')
plt.savefig('Chain1.jpg')
plt.clf()
fig = plt.figure(figsize=(10,7))
ax=fig.add_axes([0.10,0.15,0.85,0.75])
plt.plot(MCMC_step, chain2, color='red', linewidth=2, label='Mean prediction')
ax.set_xlabel("MCMC Step",fontsize=22)
ax.set_ylabel("Parameter 2",fontsize=22)
plt.legend(loc='best')
plt.savefig('Chain2.jpg')
plt.clf()
fig = plt.figure(figsize=(10,7))
ax=fig.add_axes([0.10,0.15,0.85,0.75])
plt.plot(MCMC_step, chain3, color='red', linewidth=2, label='Mean prediction')
ax.set_xlabel("MCMC Step",fontsize=22)
ax.set_ylabel("Parameter 3",fontsize=22)
plt.legend(loc='best')
plt.savefig('Chain3.jpg')
plt.clf()
fig = plt.figure(figsize=(10,7))
ax=fig.add_axes([0.10,0.15,0.85,0.75])
plt.plot(MCMC_step, chain4, color='red', linewidth=2, label='Mean prediction')
ax.set_xlabel("MCMC Step",fontsize=22)
ax.set_ylabel("Parameter 4",fontsize=22)
plt.legend(loc='best')
plt.savefig('Chain4.jpg')
plt.clf()
# #Uncomment if posterior predictive is also inferred
# fig = plt.figure(figsize=(10,7))
# ax=fig.add_axes([0.10,0.15,0.85,0.75])
# plt.plot(MCMC_step, chain5, color='red', linewidth=2, label='Mean prediction')
# ax.set_xlabel("MCMC Step",fontsize=22)
# ax.set_ylabel("STD",fontsize=22)
# plt.legend(loc='best')
# plt.savefig('ChainSTD.jpg')
# plt.clf() |
"""empty message
Revision ID: 087d83f43bf5
Revises: d53d3f23bcfa
Create Date: 2021-08-07 09:16:49.219362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '087d83f43bf5'
down_revision = 'd53d3f23bcfa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('votes', sa.Column('ref', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('votes', 'ref')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 10:49:00 2019
@author: kj22643
"""
%reset
import numpy as np
import pandas as pd
import os
import scanpy as sc
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import plot, show, draw, figure, cm
import matplotlib as plt
import random
from collections import OrderedDict
import copy
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
import plotnine as gg
import scipy as sp
import scipy.stats as stats
import sklearn as sk
import sklearn.model_selection as model_selection
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
import sklearn.feature_selection as feature_selection
import sklearn.linear_model as linear_model
import sklearn.pipeline as pipeline
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
os.chdir('/Users/kj22643/Documents/Documents/231_Classifier_Project/code')
from func_file import find_mean_AUC
from func_file import find_mean_AUC_SVM
path = '/Users/kj22643/Documents/Documents/231_Classifier_Project/data'
#path = '/stor/scratch/Brock/231_10X_data/'
os.chdir(path)
sc.settings.figdir = 'KJ_plots'
sc.set_figure_params(dpi_save=300)
sc.settings.verbosity = 3
adata = sc.read('daylin_anndata.h5ad')
adata.obs.head()
# current samples:
#BgL1K
#30hr
#Rel-1
#Rel-2
# We will change these to time points
#%% Assign survivor category in adata.obs
longTreatLins = adata.obs.loc[(adata.obs['sample'].isin(['Rel-1','Rel-2']))&(adata.obs.lineage!='nan'),'lineage'].unique().tolist()
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==False,'survivor'] = 'sens'
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==True,'survivor'] = 'res'
samps= adata.obs['sample'].unique()
timepoint = np.array(['t=0hr', 't=30hr', 't=1344hr'])
adata.obs.loc[adata.obs['sample']==samps[0], 'timepoint']='t=0hr'
adata.obs.loc[adata.obs['sample']==samps[1], 'timepoint']='t=30hr'
adata.obs.loc[adata.obs['sample']==samps[2], 'timepoint']='t=1344hr'
adata.obs.loc[adata.obs['sample']==samps[3], 'timepoint']='t=1344hr'
#%% Separately make dataframes for the pre-treatment, intermediate, and post treatment samples
# t=0 hr (pre-treatment), 3182 pre treatment cells
# We want to keep the info about the lineage so we can potentially
# use it to make evenly divided testing and training data sets
adata_pre = adata[adata.obs['timepoint']=='t=0hr', :]
dfpre = pd.concat([adata_pre.obs['survivor'], adata_pre.obs['lineage'],
pd.DataFrame(adata_pre.raw.X,index=adata_pre.obs.index,
columns=adata_pre.var_names),],axis=1)
# t = 30 hr (intermediate timepoint) 5169 int treatment cells
adata_int = adata[adata.obs['timepoint']=='t=30hr', :]
dfint = pd.concat([adata_int.obs['lineage'],
pd.DataFrame(adata_int.raw.X, index=adata_int.obs.index,
columns = adata_int.var_names),], axis=1)
# t=1344 hr (~roughly 8 weeks), 10332 post treatment cells
adata_post = adata[adata.obs['timepoint']=='t=1344hr', :]
dfpost = pd.concat([adata_post.obs['lineage'],
pd.DataFrame(adata_post.raw.X, index=adata_post.obs.index,
columns = adata_post.var_names),],axis =1)
#%% Use sklearn to do principle component analysis on the entire pre-treatment sample
#X = dfpre.loc[:, dfpre.columns !='survivor', dfpre.columns !='lineage']
X = dfpre.drop(columns= ['survivor', 'lineage'])
y= pd.factorize(dfpre['survivor'])[0]
phi0 = sum(y)/len(y)
#%%
# Start by writing the pseudo-code to generate
# Step 1: Generate B bootstrap samples of size n with simple random sampling.
B=100 # The number of bootstrap data sets
ncells = len(y)
bootstrap_dict = {'Xbs':{}, 'ybs':{}, 'bs_indices':{}, 'mvec':{}, 'theta':{},}
Xbs = {}
ybs = {}
bs_indices = {}
mvec = {}
theta = {}
for i in range(B):
# draw randomly from the indices 1:ncells to generate a boostrap sample from X and save it in a dictionary
bs_indices= np.random.randint(ncells,size = ncells)
# add these indices to the dictionary
# make the m vector, which counts the number of 0s through ns observed in bs_indices
#and add this to the dictionary
mvec[i] = np.zeros((ncells,1)) # placeholder, this will be filled with the number of observations at each index in the BS sample
# make the bootstrapped data set using the bootstrapindices
Xbs[i] = X.iloc[bs_indices, :]
ybs[i] =y.iloc[bs_indices,:]
# Put all of the bootstrap samples into a dictionary that contains the B bootstrapped data sets
bootstrap_dict['Xbs'] = Xbs
bootstrap_dict['ybs']= ybs
bootstrap_dict['bs_indices'] = bs_indices
bootstrap_dict['mvec'] = mvec
# Within each bootstrap data set, need to perform LOOCV on each index, and keep track of the number of correctly predicted samples
for i in range(B):
ct_corr = 0
# declare the X for the bootstrap sample
Xbs = bootstrap_dict['Xbs'][i]
ybs = bootstrap_dict['ybs'][i]
for j in range(ncells):
# perform LOOCV on data set that excludes the jth index
# need to remove all of the indices that contain 0, 1, 2.... n from training set
train_ind = bs_indices[i]!=j
Xtrain = Xbs.iloc[train_ind,:]
ytrain = ybs.iloc[train_ind,:]
|
import os
import sys
import shutil
def add_gc(datadir):
print(datadir)
if ("gc" in datadir):
return
key = "_p0"
sp = datadir.split(key)
newdatadir = sp[0] + "_gc0.0" + key + sp[1]
print(newdatadir)
shutil.move(datadir, newdatadir)
def rm_gc(datadir):
print(datadir)
key = "_gc0.0"
if (not key in datadir):
return
sp = datadir.split(key)
assert(len(sp) > 1)
newdatadir = sp[0] + sp[1]
print(newdatadir)
shutil.move(datadir, newdatadir)
if (__name__ == "__main__"):
if (len(sys.argv) < 2):
print("Syntax python " + os.path.basename(__file__) + " datasets")
sys.exit(1)
for datadir in sys.argv[1:]:
add_gc(datadir)
|
import statistic
nnn=int(input())
a=list(map(int,input().split()))
print(statistic.median(a))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import glob
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.wrappers.scikit_learn import KerasRegressor
import tensorflow as tf
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input
from keras.layers import Embedding
from keras.layers.merge import add
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.utils import to_categorical
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
# In[2]:
group_data=pd.read_csv('data.csv')
df=pd.read_csv('data.csv')
# In[3]:
E = group_data.sdEnergy.unique()
lgE = np.log10(E)
lgEmin = 18.4
lgEmax = 20.1
dlgE = 0.1
lgECenters = np.arange(lgEmin + dlgE/2., lgEmax, dlgE)
ECenters = 10.**lgECenters
Counts = np.zeros(len(lgECenters))
c=np.zeros(len(lgE))
for i in range(0, len(lgE)):
thisLgE = lgE[i]
ibin = int((thisLgE - lgEmin)/dlgE)
if(thisLgE < lgEmin or thisLgE > lgEmax):
print ("Event energy out of range: lgE =", thisLgE)
sys.exit()
Counts[ibin] +=1
c[i]=ibin
# In[4]:
train_count=[]
test_count=[]
val_count=[]
train=pd.DataFrame()
test=pd.DataFrame()
val=pd.DataFrame()
#group_data=df.copy()
for i in range(len(Counts)):
id1=[]
for j in range(len(c)):
if i==c[j]:
id1.append(df.eventId.unique()[j])
if len(id1)>2:
train_id,test_id=train_test_split(id1, test_size=0.1, random_state=42)
#val_id,test_id=train_test_split(temp, test_size=0.5, random_state=42)
train_temp = group_data.loc[group_data['eventId'].isin(train_id)]
test_temp = group_data.loc[group_data['eventId'].isin(test_id)]
#val_temp = group_data.loc[group_data['eventId'].isin(val_id)]
train=train.append(train_temp,ignore_index=True)
test=test.append(test_temp,ignore_index=True)
#val=val.append(val_temp,ignore_index=True)
train_count.append(train_temp.shape[0])
test_count.append(test_temp.shape[0])
#val_count.append(val_temp.shape[0])
elif len(id1)==2:
train_id,test_id=train_test_split(id1, test_size=0.1, random_state=42)
#val_id,test_id=train_test_split(temp, test_size=0.5, random_state=42)
train_temp = group_data.loc[group_data['eventId'].isin(train_id)]
test_temp = group_data.loc[group_data['eventId'].isin(test_id)]
#val_temp = group_data.loc[group_data['eventId'].isin(val_id)]
train=train.append(train_temp,ignore_index=True)
test=test.append(test_temp,ignore_index=True)
#val=val.append(val_temp,ignore_index=True)
train_count.append(train_temp.shape[0])
test_count.append(test_temp.shape[0])
#val_count.append(0)
elif len(id1)==1:
test_temp = group_data.loc[group_data['eventId'].isin(id1)]
test=test.append(test_temp,ignore_index=True)
train_count.append(0)
test_count.append(test_temp.shape[0])
#val_count.append(0)
else:
train_count.append(0)
test_count.append(0)
#val_count.append(0)
print('training data count',sum(train_count))
print('testing data count',sum(test_count))
#print('validation data count',sum(val_count))
print('train data',train.shape)
print('test data',test.shape)
#print('val data',val.shape)
print(sum(train_count)+sum(test_count)+sum(val_count))
assert (sum(train_count)+sum(test_count)+sum(val_count))==group_data.shape[0]
train_count = pd.DataFrame(train_count)
train_count.to_csv('train_count.csv')
test_count = pd.DataFrame(test_count)
test_count.to_csv('test_count.csv')
#val_count = pd.DataFrame(val_count)
#val_count.to_csv('val_count.csv')
df_train=train.drop(columns=[ 'eventId', 'sId', 'sPMT','sTimeSec', 'sTimeNsec',
'sSignalStart', 'sSignalEnd','fdXmaxErr','sVEM','sStatus','sXrel','sYrel','sZrel'])
df_test=test.drop(columns=['eventId', 'sId', 'sPMT','sTimeSec', 'sTimeNsec',
'sSignalStart', 'sSignalEnd','fdXmaxErr','sVEM','sStatus','sXrel','sYrel','sZrel'])
#df_val=val.drop(columns=['eventId', 'sId', 'sPMT','sTimeSec', 'sTimeNsec',
#'sSignalStart', 'sSignalEnd','fdXmaxErr','sVEM','sStatus','sXrel','sYrel','sZrel'])
df.shape
train.to_csv('train.csv')
test.to_csv('test.csv')
#val.to_csv('val.csv')
# In[5]:
print(198+188+71)
# In[6]:
len(train_count)
# In[7]:
y_train=np.array(df_train['fdXmax'].to_list())
y_train=np.reshape(y_train, (-1,1))
X_train=df_train.drop(columns=['fdXmax'])
y_test=np.array(df_test['fdXmax'].to_list())
y_test=np.reshape(y_test, (-1,1))
X_test=df_test.drop(columns=['fdXmax'])
#y_val=np.array(df_val['fdXmax'].to_list())
#y_val=np.reshape(y_val, (-1,1))
#X_val=df_val.drop(columns=['fdXmax'])
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# In[8]:
y_train
# In[9]:
X_train.head()
# In[10]:
X_train1=X_train.iloc[: ,0:17].copy()
X_train2=X_train.iloc[: ,17:].copy()
X_test1=X_test.iloc[: ,0:17].copy()
X_test2=X_test.iloc[: ,17:].copy()
#X_val1=X_val.iloc[: ,0:17].copy()
#X_val2=X_val.iloc[: ,17:].copy()
# In[11]:
X_train.columns
# In[12]:
#y_train=np.array(df_train['fdXmax'].to_list())
#y_train=np.reshape(y_train, (-1,1))
# In[13]:
#X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.1)
# In[14]:
y_train.shape[0]
# In[15]:
scaler_x1 = MinMaxScaler()
scaler_x2 = MinMaxScaler()
scaler_y = MinMaxScaler()
fit_X1=scaler_x1.fit(X_train1)
X_train1=fit_X1.transform(X_train1)
X_test1=fit_X1.transform(X_test1)
#X_val1=fit_X1.transform(X_val1)
fit_X2=scaler_x2.fit(X_train2)
X_train2=fit_X2.transform(X_train2)
X_test2=fit_X2.transform(X_test2)
#X_val2=fit_X2.transform(X_val2)
fit_y=scaler_y.fit(y_train)
y_train=fit_y.transform(y_train)
y_test=fit_y.transform(y_test)
#y_val=fit_y.transform(y_val)
# In[16]:
Y_train=list(y_train.ravel())
Y_test=list(y_test.ravel())
# In[ ]:
# In[17]:
X_train['sdEnergy'].to_list()
# In[18]:
d = {'sdEnergy':X_train['sdEnergy'].to_list(),'fdXmax':Y_train}
y_train=pd.DataFrame(d)
# In[19]:
d = {'sdEnergy':X_test['sdEnergy'].to_list(),'fdXmax':Y_test}
y_test=pd.DataFrame(d)
# In[20]:
y_test
# In[ ]:
# In[21]:
X_train1.shape
# In[22]:
X_train2 = np.expand_dims(X_train2,2)
#X_val2 = np.expand_dims(X_val2,2)
X_test2 = np.expand_dims(X_test2,2)
# In[23]:
def J(E):
# J01234 equation 3.1 from arXiv:1909.09073, ICRC19 Auger fit by Valerio Verzi
# does not set overall normalization
# parameters reflect central best-fit parameters
E01 = 0.15e18 # eV
E12 = 6.2e18
E23 = 12e18
E34 = 50e18
gamma0 = 2.92
gamma1 = 3.27
gamma2 = 2.2
gamma3 = 3.2
gamma4 = 5.4
j = 1
j *= E**(-gamma0)
j *= (1+(E/E01)**gamma0) / (1+(E/E01)**gamma1)
j *= (1+(E/E12)**gamma1) / (1+(E/E12)**gamma2)
j *= (1+(E/E23)**gamma2) / (1+(E/E23)**gamma3)
j *= (1+(E/E34)**gamma3) / (1+(E/E34)**gamma4)
#print(j)
return j
# In[ ]:
# In[24]:
1/J(10**21)
# In[ ]:
# import random
# y_pred=np.random.rand(26)
# print('y_pred',y_pred)
# ptweighted_mse(y_train, y_pred)
# In[25]:
import numpy as np
#from keras.layers import LSTM
np.random.seed(1337)
inputs1 = Input(shape=(X_train1.shape[1],))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(128, activation='relu')(inputs1)
#model = Sequential()
#model.add(Dense(10, input_dim=X_train.shape[1], kernel_initializer='normal', activation='relu'))
#model.add(Dense(300, kernel_initializer='normal', activation='relu'))
#model.add(Dense(100, kernel_initializer='normal', activation='relu'))
#model.add(Dense(30, kernel_initializer='normal', activation='relu'))
model = Sequential()
inputs2 = Input(shape=(X_train2.shape[1],X_train2.shape[2]))
se1=Conv1D(filters=64, kernel_size=3, activation='relu')(inputs2)
se2=Conv1D(filters=64, kernel_size=3, activation='relu')(se1)
se3 = Dropout(0.5)(se2)
se4 = MaxPooling1D(pool_size=2)(se2)
se5 = Flatten()(se4)
se6 = Dense(128, activation='relu')(se5)
decoder1 = add([fe2, se6])
decoder2 = Dense(128, activation='relu')(decoder1)
outputs = Dense(1, activation='sigmoid')(decoder2)
# merge the two input models
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
#model = Sequential()
#model.add(LSTM(50, input_shape=(X_train.shape[1], X_train.shape[2])))
#model.add(Dropout(0.2))
#model.add(Dense(1))
model.summary()
# In[40]:
import sys
def ptweighted_mse(y_true, y_pred):
# Expected dimensions:
# ytrue - [Nevents, 2]
# ypred - [Nevents, 1]
#y_true=tf.print(y_true,[y_true],'y_true')
E = y_true[:,0] # energy
r=tf.print("J(E):", tf.convert_to_tensor(J(E), dtype=tf.float32), output_stream=sys.stdout)
y_true = y_true[:,1] # Xmax true
diff = y_true - y_pred
diff2 = tf.square(diff)
weight = J(10**19)/J(E)
weight1=tf.print('weight',tf.convert_to_tensor(weight, dtype=tf.float32),output_stream=sys.stdout)
prod = tf.multiply(diff2, weight)
loss = tf.reduce_sum(prod)
loss1=tf.print('loss',loss,output_stream=sys.stdout)
sys.stdout.flush()
return loss
# In[ ]:
# In[41]:
model.compile(loss=ptweighted_mse, optimizer='adam', metrics=[ptweighted_mse,'mae'])
# In[42]:
print(dir(tf.feature_column))
# In[43]:
history=model.fit([X_train1, X_train2], y_train, epochs=2, verbose=2, batch_size=8,shuffle=True)
# In[128]:
print(y_train)
# In[ ]:
# In[ ]:
# In[38]:
print(history.history.keys())
# "Loss"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig("model_loss.png")
plt.show()
# In[39]:
y_pred= model.predict([X_test1, X_test2])
# In[40]:
y_pred.shape
# In[41]:
from sklearn.metrics import mean_squared_error
from math import sqrt
print('normal mse',sqrt(ptweighted_mse(y_test, y_pred)))
#print('normal mae',mean_absolute_error(y_test, y_pred))
# In[ ]:
# In[78]:
y_test.shape
y_test=np.reshape(y_test, (y_test.shape[0],1))
y_pred=np.reshape(y_pred, (y_test.shape[0],1))
y_test_normalized=scaler_y.inverse_transform(y_test)
y_pred_normalized=scaler_y.inverse_transform(y_pred)
y_test=np.reshape(y_test, (y_test.shape[0],))
y_pred=np.reshape(y_pred, (y_test.shape[0],))
y_test_normalized=np.reshape(y_test_normalized, (y_test.shape[0],))
y_pred_normalized=np.reshape(y_pred_normalized, (y_test.shape[0],))
# In[44]:
print('mse',sqrt(mean_squared_error(y_test_normalized, y_pred_normalized)))
# In[45]:
print('mae',mean_absolute_error(y_test_normalized, y_pred_normalized))
# In[46]:
df_result=pd.DataFrame()
df_result['Normalized True Value']=y_test
df_result['Normalized Predicted Value']=y_pred
df_result['True Value']=y_test_normalized
df_result['Predicted Value']=y_pred_normalized
df_result.to_csv('results.csv')
# In[47]:
df1 = df_result[['True Value','Predicted Value']]
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# In[38]:
test.to_csv('test.csv')
# In[54]:
_, ax = plt.subplots()
x = [ 400, 500, 600, 700,800,900]
y = [400, 500, 600, 700,800,900]
ax.scatter(x = df_result['True Value'], y=df_result['Predicted Value'], c = 'blue', label = 'Actual', alpha = 0.3)
#ax.scatter(x = range(0, y_pred.size), y=y_pred, c = 'red', label = 'Predicted', alpha = 0.3)
plt.plot(x,y)
plt.title('Actual and predicted values')
plt.xlabel('True')
plt.ylabel('Predicted')
plt.legend()
plt.savefig("actual_vs_predicted.png")
plt.show()
# In[40]:
diff = df_result['True Value'] - df_result['Predicted Value']
diff.hist(bins = 40)
plt.title('Histogram of prediction errors')
plt.xlabel('MPG prediction error')
plt.ylabel('Frequency')
plt.savefig('prediction_histogram.png')
# In[55]:
_, ax = plt.subplots()
ax.scatter(x = range(0, y_test.size), y=y_test, c = 'blue', label = 'Actual', alpha = 0.3)
ax.scatter(x = range(0, y_pred.size), y=y_pred, c = 'red', label = 'Predicted', alpha = 0.3)
plt.title('Actual and predicted values')
plt.xlabel('Observations')
plt.ylabel('xmax')
plt.legend()
plt.savefig("xmax_vs_observation.png")
plt.show()
# In[ ]:
# partial caption sequence model
inputs2 = Input(shape=(X_train2.shape[1],))
se1 = Embedding(X_train2.shape[0], 256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# decoder (feed forward) model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
|
class BST:
def __init__(self , key):
self.key = key
self.leftchild = None
self.rightchild = None
root = BST(10)
print(root.key)
print(root.leftchild)
print(root.rightchild)
root.leftchild = BST(5)
print(root.key)
print(root.leftchild)
print(root.leftchild.key)
print(root.rightchild)
|
from pyramid.httpexceptions import HTTPFound, HTTPUnauthorized
from pyramid.view import view_config
from blog.repo import errors
from blog.repo.mongodb import db
from blog.utils import validate_signup
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
return {}
@view_config(route_name='hello', renderer='templates/hello.pt')
def hello_view(request):
session_id = request.cookies.get('session')
username = db.sessions.get_username(session_id)
if not username:
print("no previous session. redirecting to /signup")
return HTTPFound(location='/signup')
else:
try:
user = db.users.get_user(username)
except errors.EntryNotFound:
return HTTPUnauthorized(detail='Your session seems to be corrupted. Try restarting your session.')
else:
return {
'name': user['name'] if 'name' in user else username,
'username': username
}
@view_config(route_name='signup', renderer='templates/signup.pt', request_method='GET')
def signup_get(request):
return {
'errors': [],
'username': '',
'name': '',
'email': ''
}
@view_config(route_name='signup', renderer='templates/signup.pt', request_method='POST')
def signup_post(request):
res = validate_signup(request.params['username'], request.params['passw'], request.params['repeat'],
request.params['name'], request.params['email'])
if res['ok']:
try:
db.users.create_user(res['username'], res['passw'], res['name'], res['email'])
except errors.EntryExists:
res['errors'] = ["user with same name exists. try something else."]
return res
except errors.SomethingWentWrong as e:
print("[!] ERROR:", e)
res['errors'] = ["something went wrong. please try again."]
return res
else:
try:
request.cookies['user'] = db.sessions.start_session(res['username'])
except errors.SomethingWentWrong as e:
print("[!] ERROR:", e)
res['errors'] = ["your signup succeeded, but there was an error later on. please, try reloging."]
return res
else:
return HTTPFound(location='/hello')
else:
return res
@view_config(route_name='login', renderer='templates/login.pt', request_method='GET')
def login_get(request):
return {
'errors': [],
'username': ''
}
@view_config(route_name='login', renderer='templates/login.pt', request_method='POST')
def login_post(request):
res = {
'username': request.params['username']
}
try:
db.users.validate_user(request.params['username'], request.params['passw'])
except errors.EntryNotFound:
res['errors'] = ["user not found. please, make sure the username is correct."]
return res
except errors.WrongCredentials:
res['errors'] = ["wrong credentials. make sure your password is correct."]
return res
except errors.SomethingWentWrong as e:
print("[!] ERROR:", e)
res['errors'] = ["something went wrong. please try again."]
return res
else:
try:
request.cookies['user'] = db.sessions.start_session(res['username'])
return HTTPFound(location='/hello')
except errors.SomethingWentWrong as e:
print("[!] ERROR:", e)
res['errors'] = ["something went wrong. please try again."]
return res
@view_config(route_name='logout')
def logout_view(request):
try:
db.sessions.end_session(request.cookies['user'])
return HTTPFound(location='/hello')
except errors.SomethingWentWrong as e:
print("[!] ERROR:", e)
return HTTPFound(location='/logout')
else:
return HTTPFound(location='/login') |
import click
from sqlalchemy.orm.exc import NoResultFound
from tabulate import tabulate
from .db import Domains, get_db_connection
@click.group()
def domain():
pass
@click.command()
@click.pass_obj
def show(obj):
"""Lists all domains."""
conn = get_db_connection(obj)
query = conn.query(Domains).order_by(Domains.id)
result = [{"id": row.id, "domain": row.domain} for row in query]
click.echo(tabulate(result, headers="keys"))
return
@click.command()
@click.argument("domain", type=click.STRING)
@click.pass_obj
def add(obj, domain):
"""Add a domain."""
conn = get_db_connection(obj)
new_domain = Domains(domain=domain)
conn.add(new_domain)
try:
conn.commit()
except Exception as e:
click.echo(e)
raise click.Abort
click.echo(f"Domain '{domain}' was successfully added.")
return
@click.command()
@click.argument("domain", type=click.STRING)
@click.option(
"--yes",
"confirmation",
type=click.BOOL,
is_flag=True,
default=False,
help="Delete domain without confirmation.",
)
@click.pass_obj
def remove(obj, domain, confirmation):
"""Delete a domain.""" # TODO: Check for existing accounts / aliases
conn = get_db_connection(obj)
try:
del_domain = conn.query(Domains).filter_by(domain=domain).one()
except NoResultFound:
click.echo(f"Domain '{domain}' does not exist.")
raise click.Abort
if not confirmation:
click.confirm(
f"Are you sure you want to delete the account '{domain}'?", abort=True
)
conn.delete(del_domain)
try:
conn.commit()
except Exception as e:
click.echo(e)
raise click.Abort
click.echo(f"Domain '{domain}' was succesfully deleted.")
return
domain.add_command(show)
domain.add_command(add)
domain.add_command(remove)
|
# Generated by Django 3.1.2 on 2020-10-31 14:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testingSystem', '0003_auto_20201031_1349'),
]
operations = [
migrations.RenameField(
model_name='task',
old_name='checker_path',
new_name='checker_name',
),
migrations.RenameField(
model_name='task',
old_name='post_processor_path',
new_name='post_processor_name',
),
]
|
import logging
from app import login_manager
from flask_login import UserMixin
from main import *
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
class User(UserMixin):
def __init__(self, id):
self.id = id
self.set_username()
def set_username(self):
cur.execute("SELECT username FROM username")
username = cur.fetchone()
self.username = username[0]
|
import sys
def lookfor(value,table,size,number):
if size == 0 or number ==0:
table[(size,number)]=0
return table[(size,number)]
elif (size,number) in table.keys():
return table[(size,number)]
else:
tmp1=lookfor(value,table,size,number-1)
if size > value[number-1][1]:
tmp2=lookfor(value,table,size-value[number-1][1],number-1)+value[number-1][0]
else:
tmp2=0
table[(size,number)]=max(tmp1,tmp2)
return table[(size,number)]
if __name__ == '__main__':
f=open("knapsack_big.txt",'r')
line=f.readline()
sys.setrecursionlimit(2100)
[size,number]=map(int,open("knapsack_big.txt",'r').readline().split())
value=[map(int,x.split(' ')) for x in open("knapsack_big.txt",'r').read().split('\n')[1:-1]]
print size
print number
print value
table={}
x=lookfor(value,table,size,number)
print x
|
#!/usr/bin/env python
import os
import argparse
import uproot
import ROOT as r
import pandas as pd
import joblib
import time
import fastjet as fj
import pythia8
import fjcontrib as rt
import fjext
def fj_parts_from_tracks(tracks):
fjparts = []
_pts = tracks['ParticlePt']
_etas = tracks['ParticleEta']
_phis = tracks['ParticlePhi']
for index, row in tracks.iterrows():
lv = r.Math.PtEtaPhiMVector(row['ParticlePt'], row['ParticleEta'], row['ParticlePhi'], 0.0)
psj = fj.PseudoJet(lv.Px(), lv.Py(), lv.Pz(), lv.E())
psj.set_user_index(index)
fjparts.append(psj)
return fjparts
def fj_parts_from_tracks_numpy(tracks):
fjparts = []
_pts = tracks['ParticlePt']
_etas = tracks['ParticleEta']
_phis = tracks['ParticlePhi']
fjparts = fjext.vectorize_pt_eta_phi( _pts.values, _etas.values, _phis.values)
return fjparts
#----------------------------------------------------------------------
# this is from examples/python/02-area.py
def print_jets(jets):
print("{0:>5s} {1:>10s} {2:>10s} {3:>10s} {4:>10s}".format(
"jet #", "pt", "rap", "phi", "area"))
for ijet in range(len(jets)):
jet = jets[ijet]
print("{0:5d} {1:10.3f} {2:10.4f} {3:10.4f} {3:10.4f}".format(
ijet, jet.pt(), jet.rap(), jet.phi(), jet.area))
def fj_example_02_area(event):
# cluster the event
jet_def = fj.JetDefinition(fj.antikt_algorithm, 0.4)
area_def = fj.AreaDefinition(fj.active_area, fj.GhostedAreaSpec(5.0))
cs = fj.ClusterSequenceArea(event, jet_def, area_def)
jets = fj.SelectorPtMin(5.0)(fj.sorted_by_pt(cs.inclusive_jets()))
print("jet def:", jet_def)
print("area def:", area_def)
print("#-------------------- initial jets --------------------")
print_jets(jets)
#----------------------------------------------------------------------
# estimate the background
maxrap = 4.0
grid_spacing = 0.55
gmbge = fj.GridMedianBackgroundEstimator(maxrap, grid_spacing)
gmbge.set_particles(event)
print("#-------------------- background properties --------------------")
print("rho = ", gmbge.rho())
print("sigma = ", gmbge.sigma())
print()
#----------------------------------------------------------------------
# subtract the jets
subtractor = fj.Subtractor(gmbge)
subtracted_jets = subtractor(jets)
print("#-------------------- subtracted jets --------------------")
print_jets(subtracted_jets)
#----------------------------------------------------------------------
# MP note: read more about uproot unpacking at https://github.com/scikit-hep/uproot#filling-pandas-dataframes
# MP note: there may be more efficient way to do this...
def main(args):
fname = args.fname
file = uproot.open(fname)
all_ttrees = dict(file.allitems(filterclass=lambda cls: issubclass(cls, uproot.tree.TTreeMethods)))
tracks = all_ttrees[b'PWGHF_TreeCreator/tree_Particle;1']
pds_trks = tracks.pandas.df() # entrystop=10)
events = all_ttrees[b'PWGHF_TreeCreator/tree_event_char;1']
pds_evs = events.pandas.df()
# print the banner first
fj.ClusterSequence.print_banner()
# signal jet definition
maxrap = 0.9
jet_R0 = args.jetR
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
jet_selector = fj.SelectorPtMin(0.0) & fj.SelectorPtMax(1000.0) & fj.SelectorAbsEtaMax(1)
jet_area_def = fj.AreaDefinition(fj.active_area, fj.GhostedAreaSpec(maxrap))
print(jet_def)
# background estimation
grid_spacing = maxrap/10.
gmbge = fj.GridMedianBackgroundEstimator(maxrap, grid_spacing)
print()
output_columns = ['evid', 'pt', 'eta', 'phi', 'area', 'ptsub']
e_jets = pd.DataFrame(columns=output_columns)
for i, e in pds_evs.iterrows():
iev_id = int(e['ev_id'])
_ts = pds_trks.loc[pds_trks['ev_id'] == iev_id]
start = time.time()
_tpsj = fj_parts_from_tracks_numpy(_ts)
end = time.time()
dt_swig = end - start
start = time.time()
_tpsj_for = fj_parts_from_tracks(_ts)
end = time.time()
dt_for = end - start
# print ('len {} =?= {}'.format(len(_tpsj_for), len(_tpsj)))
print ('[i] timing (ntracks={}): dt_for: {} dt_swig: {} ratio: {}'.format(len(_tpsj), dt_for, dt_swig, dt_for / dt_swig))
# print('maximum particle rapidity:', max([psj.rap() for psj in _tpsj]))
_cs = fj.ClusterSequenceArea(_tpsj, jet_def, jet_area_def)
_jets = jet_selector(fj.sorted_by_pt(_cs.inclusive_jets()))
gmbge.set_particles(_tpsj)
# print("rho = ", gmbge.rho())
# print("sigma = ", gmbge.sigma())
# _jets = jet_selector(jet_def(_tpsj))
# _jets_a = [[iev_id, j.perp(), j.eta(), j.phi()] for j in _jets]
# _jets_a = pd.DataFrame(np.array([[iev_id, j.perp(), j.eta(), j.phi()] for j in _jets]), columns=['evid', 'pt', 'eta', 'phi'])
_jets_a = pd.DataFrame( [[iev_id, j.perp(), j.eta(), j.phi(), j.area(), j.perp() - gmbge.rho() * j.area()] for j in _jets],
columns=output_columns)
# , columns=['evid, pt, eta, phi']
e_jets = e_jets.append(_jets_a, ignore_index=True)
# print('event', i, 'number of parts', len(_tpsj), 'number of jets', len(_jets))
# print(_jets_a.describe())
if args.fjsubtract:
fj_example_02_area(_tpsj)
# print(e_jets.describe())
joblib.dump(e_jets, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='jet reco on alice data', prog=os.path.basename(__file__))
parser.add_argument('-n', '--nevents', help='number of events', default=1000, type=int)
parser.add_argument('-f', '--fname', help='input file name', type=str, default=None, required=True)
parser.add_argument('-R', '--jetR', help='jet radius', default=0.4, type=float)
parser.add_argument('-o', '--output', help='output file name', default='{}_output.joblib'.format(os.path.basename(__file__)), type=str)
parser.add_argument('--fjsubtract', help='do and show fj subtraction', action='store_true', default=False)
args = parser.parse_args()
main(args)
#fname = '/Users/ploskon/data/HFtree_trains/13-06-2019/488_20190613-0256/unmerged/child_1/0001/AnalysisResults.root'
|
"""
A set of turtles are made to race.
User sets number of turtles to beracing.
User can place a bet on a turtle to win.
"""
import turtle
import random
import tkinter as tk
from tkinter import messagebox as mb
color_lst = ["red","blue","yellow","green","violet","gold",
"orange", "magenta"]
def fn_start_finish_line():
# Set width of track for number of turtles racing
width = (num_t * 50) + 50
# Create turtle that will draw start/finish lines
my_line = turtle.Turtle()
my_line.color("white")
my_line.speed(1000)
my_line.hideturtle()
# Draw start line
my_line.penup()
my_line.goto(-300,-200)
my_line.pendown()
my_line.forward(width)
# Draw finish line
my_line.penup()
my_line.goto(-300,200)
my_line.pendown()
my_line.forward(width)
def fn_create_turtles(num_t):
for i in range(1,(num_t+1)):
name = 't{}'.format(i)
name = turtle.Turtle()
t.append(name)
name.color(random.choice(color_lst))
def fn_startline():
for my_turtle in t:
my_turtle.penup()
my_turtle.speed(1000)
x_pos = -300 + (50 * (t.index(my_turtle)+1))
my_turtle.goto(x_pos,-200)
my_turtle.pendown()
my_turtle.left(90)
my_turtle.write("T{}".format(t.index(my_turtle)+1), move=False,
font=('times new roman',15,'bold'))
def fn_move_forward(): # Move turtle ahead by a random distance at
while (True): # a random speed
for item in t:
dist = random.randint(0,100)
item.speed(random.randint(0,1000))
item.forward(dist)
if (item.ycor() >= 200): # Check if winner
winner_name = "{}".format(t.index(item)+1)
fn_announce_winner(winner_name)
return None
scores=[] # Empty list to store players wins and losses
def fn_announce_winner(my_winner):
root = tk.Tk()
root.withdraw()
if user_bet == my_winner:
conclu = "You WIN!!!"
scores.append("WIN")
print("Win")
else:
conclu = "You LOSE!!!"
scores.append("LOSS")
print("Loss")
my_msg = "Winner is Turtle: T{}! \n{}".format(my_winner, conclu)
tk.messagebox.showinfo(title="Turtle Racing", message = my_msg)
def print_scores():
num_match = len(scores)
num_win = scores.count("WIN")
num_loss = scores.count("LOSS")
per_win = 100 * (num_win / num_match)
per_loss = 100 * (num_loss / num_match)
print("\n\n**********************************")
print("You played {} matche(s).".format(num_match))
print("You Won {} matche(s). ({:.1f}%)".format(num_win, per_win))
print("You Lost {} matche(s). ({:.1f}%)".format(num_loss, per_loss))
print("**********************************")
if num_win > num_loss:
print("\nYOU WIN!!")
elif num_win < num_loss:
print("\nYOU LOSE!!")
elif num_win == num_loss:
print("\nIts a DRAW! :( ")
while True:
num_t = int(input("\nPlease input the number of turtles to race: "))
user_bet = str(input("Place a bet on the winning Turtle: T"))
t = [] # list of turtles to be racing
t_screen = turtle.Screen()
t_screen.clear()
t_screen.bgcolor("black")
fn_create_turtles(num_t)
fn_start_finish_line()
fn_startline()
fn_move_forward()
user_choice = input("\nDo you want to Quit (Y/N)? ")
if user_choice.upper() == "Y":
break
print_scores()
t_screen.exitonclick()
|
from django.urls import path
from UserManagementApp.views import *
urlpatterns = [
path('login/', login),
path('logout/', logout),
path('signup/', signup),
path('activate/<uidb64>/<token>/', activate, name='activate'),
]
|
from uuid import uuid4
import logging
from django.utils.deprecation import MiddlewareMixin
class LogMiddleware(MiddlewareMixin):
""" Class for logging requests and response
at application terminal
"""
def process_request(self, request):
log_request = "Request is " + str(request)
logging.warning(log_request)
def process_response(self, request, response):
log_response = "Response is " + str(response)
logging.warning(log_response)
return response
class RawDataMiddleware(MiddlewareMixin):
""" Identify request via adding hash
to meta info for each request.
"""
def process_request(self, request):
request.META['id'] = uuid4()
logging.warning(request.META['id'])
class IdentifyResponseMiddleware(MiddlewareMixin):
""" Identify response via adding hash
to meta info for each response.
"""
def process_response(self, request, response):
response['id'] = uuid4()
return response
|
# Product class
class Product(object):
def __init__(self, price, item_name, weight, brand, cost):
self.price = price
self.item_name = item_name
self.weight = weight
self.brand = brand
self.cost = cost
self.status = "for sale"
self.displayInfo()
def displayInfo(self):
print "Price : ", str(round(self.price,2))
print "Item name: ", self.item_name
print "Weight: ", str(self.weight)
print "Brand: ", self.brand
print "Cost: ", str(self.cost)
print "Status: ", self.status
print "______________________________"
def sell(self):
self.status = "sold"
return self
def add_tax(self, tax):
self.price *= (1+tax)
return price
def return_product(self, reason):
if reason == "defective":
self.status = "defective"
self.price = 0
elif reason == "opened":
self.status = "used"
self.price = self.price * 0.80
else:
self.status = "for sale"
return self
# Instance1 = product || defective
prod = Product(38.81, "KCM1202OB 12-Cup", 6.2, "KitchenAid", 18.34)
prod.return_product("defective")
prod.displayInfo()
# # Instance2 = product || in box
prod = Product(34.99, "Beach 46205 12-Cup", 5.29 , "Hamilton", 12.97)
prod.return_product("in box")
prod.displayInfo()
# # # Instance3 = product || opened
prod = Product(79.99, "KCM1204OB 12-Cup ", 7, "KitchenAid ", 32.87)
prod.return_product("opened")
prod.displayInfo()
|
def fixSortedList(L):
head_L = L
prev_prev = L
prev = L.next
L = L.next.next
while L != None:
pass |
project_id='dataengg-streamdatatogcs'
topic_id='streamingApiData'
api_key="e14fe56dc94dd3168ca34077c3025dd28d8bda1e"
|
from collections import deque
q = deque(maxlen=3)
q.append(1)
q.append(2)
q.append(3)
print q
q.append(4)
print q
q.append(5)
print q
q = deque()
q.append(1)
q.append(2)
q.append(3)
print q
q.appendleft(4)
print q
print q.pop()
print q
print q.popleft() |
import json
import logging
import boto3
import requests
import os
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
texts = ['/start','/stop']
class ResponseMessages():
registered = "your path to meme-lord has begun! welcome"
already_re = "you are already on the journey"
error_re = "meme-supreme aplogizes, we couldn't start your journey"
deregistered = "good-bye my friend"
error_occured = "I don't feel so good! an error occured"
default = "Hi! MemeSupreme is primarly a meme delievery bot and doesn't yet support conversations. Thanks!"
reply = ResponseMessages()
def lambda_handler(event, context):
logger.info(f"Event {event}")
logger.info(f"#Event Body {event['body']} ")
body = event['body']
body = json.loads(body)
try:
process(body["message"])
except:
logger.error("Exception when invoking process")
raise
return {
'statusCode': 200
}
def process(request):
try:
logger.info(f"request: {request}")
user_id = request['from']['id']
chat_id = request['chat']['id']
username = request['from']['first_name']
message = request['text']
logger.info(f"user_id {user_id}, chat_id {chat_id}, message {message}")
result_msg = reply.default
if message.lower() in texts:
method_name = "perform_"+message[1:].lower()
result_msg = globals()[method_name](chat_id,user_id,username)
send_reply(chat_id,result_msg)
except:
logger.error("exeception occured while trying to match message with function")
raise
# if already exists skip and send different message else insert
def perform_start(chat_id,user_id,username):
chats = dynamodb.Table("Chats")
try:
chats.put_item(Item={
'chatid':str(chat_id),
'userid':str(user_id),
'username':str(username)
},
ConditionExpression='attribute_not_exists(chatid)'
)
logger.info("Successfully inserted the chatid in the db")
return reply.registered
except ClientError as e:
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
logger.info(f'already registered {e}')
return reply.already_re
else:
logger.error(f"Error while inserting into table Chats {e.response['Error']['Message']}")
return reply.error_re
def perform_stop(chat_id,user_id,username):
chats = dynamodb.Table("Chats")
try:
deleted = chats.delete_item(Key={
'chatid':str(chat_id)
},
ReturnValues='ALL_OLD')
logger.info(f"{deleted} deleted successfully")
return reply.deregistered
except ClientError as e:
logger.error(e.response['Error']['Message'])
return reply.error_occured
# decouple this and put it in seperate lambda?
def send_reply(chat_id,message):
accesscode = os.environ['accesscode']
try:
url = f"https://api.telegram.org/bot{accesscode}/sendMessage"
logger.info(f"url formed is {url}")
response = requests.post(url, data={'chat_id':chat_id,'text':message})
logger.info(f"Successfully sent message! {message}")
except RequestException as e:
logger.error(f"Couldn't send reply {e}") |
from django.urls import path
from .views import *
urlpatterns = [
path('products/', show_products),
path('products/<int:idx>/', show_product),
path('categories/', show_categories),
path('categories/<int:idx>/', show_category)
] |
# handle -help or -h
# 3+ flag arguments: -flag (like -f -rf)
# 3+ options: -arg - value
# positional argument: occurs at the end of all the -arg arguments
import sys
print(sys.argv)
"""
45 minute drill
"""
def handle_args(args):
for i in range(len(args)):
if (args[i] == "-h") or (args[i] == "-help"):
print("HELP")
elif(args[i].startswith('-') and
(i+1) < (len(args)) and not args[i + 1].startswith('-')):
arg = args[i][1:]
print(arg,":", args[i + 1])
elif(args[i].startswith('-')):
flags = args[i][1:]
flags = [x for x in flags]
if 'r' in flags:
print("Recursive")
if 'f' in flags:
print("Force")
if 'x' in flags:
print('Handles X flag')
handle_args(sys.argv)
"""
Post 45 minute drill
"""
def handle_args2(sys.argv):
handle_args2(sys.argv)
|
#!/usr/bin/env python3
"""Show the current job queue.
"""
import qmk_redis
print('*** There are %s jobs on the queue.' % (len(qmk_redis.rq.jobs)))
job_ips = {}
for i, job in enumerate(qmk_redis.rq.jobs):
if job.func_name != 'qmk_compiler.compile_json':
continue
client_ip = job.args[1]
if client_ip not in job_ips:
job_ips[client_ip] = []
job_ips[client_ip].append((job, job.args[0]))
for client_ip, jobs in job_ips.items():
print(f'\nClient IP {client_ip} has {len(jobs)} jobs:')
for job, args in jobs:
print('\t%s: %s' % (i, job.id))
print('\t %s(%s)' % (job.func_name, args))
|
from demo import app
def test_get_index():
request, response = app.test_client.get("/v1/")
assert response.status == 200
assert response.json == {"data": "hello world!"}
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @File:dssm_model.py
# @Author: Michael.liu
# @Date:2020/6/11 15:08
# @Desc: this code is ....
import tensorflow as tf
from .helper import *
TRIGRAM_D = 100
# negative sample
NEG = 4
# query batch size
query_BS = 100
# batch size
BS = query_BS * NEG
class DssmModel(object):
def __init__(self, config):
self.config = config
# self.batch_size = config["batch_size"]
self.vocab_map = DssmData.load_vocab(self.config["vocab_path"])
self.nwords = len(self.vocab_map)
#self.use_stack_rnn= self.config["use_stack_rnn"]
# if self.use_stack_rnn == True:
# self.hidden_size_rnn= self.config["hidden_size_rnn"]
# self.optimization = self.config["optimization"]
# self.max_seq_len = self.config["max_seq_len"]
self.use_stack_rnn = self.config["use_stack_rnn"]
# if self.use_stack_rnn == True:
self.hidden_size_rnn = self.config["hidden_size_rnn"]
self.optimization = self.config["optimization"]
self.max_seq_len = self.config["max_seq_len"]
# create graph
self.model_structure()
# init saver
self.init_saver()
def model_structure(self):
with tf.name_scope('input'):
# 预测时只用输入query即可,将其embedding为向量。
print("input")
with tf.name_scope('word_embeddings_layer'):
_word_embedding = tf.get_variable(name="word_embedding_arr", dtype=tf.float32,
shape=[self.nwords, TRIGRAM_D])
query_embed = tf.nn.embedding_lookup(_word_embedding, self.query_batch, name='query_batch_embed')
doc_pos_embed = tf.nn.embedding_lookup(_word_embedding, self.doc_pos_batch, name='doc_positive_embed')
doc_neg_embed = tf.nn.embedding_lookup(_word_embedding, self.doc_neg_batch, name='doc_negative_embed')
with tf.name_scope('RNN'):
# Abandon bag of words, use GRU, you can use stacked gru
# query_l1 = add_layer(query_batch, TRIGRAM_D, L1_N, activation_function=None) # tf.nn.relu()
# doc_positive_l1 = add_layer(doc_positive_batch, TRIGRAM_D, L1_N, activation_function=None)
# doc_negative_l1 = add_layer(doc_negative_batch, TRIGRAM_D, L1_N, activation_function=None)
if self.use_stack_rnn == "True":
cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size_rnn, reuse=tf.AUTO_REUSE)
stacked_gru_fw = tf.contrib.rnn.MultiRNNCell([cell_fw], state_is_tuple=True)
cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size_rnn, reuse=tf.AUTO_REUSE)
stacked_gru_bw = tf.contrib.rnn.MultiRNNCell([cell_fw], state_is_tuple=True)
(output_fw, output_bw), (_, _) = tf.nn.bidirectional_dynamic_rnn(stacked_gru_fw, stacked_gru_bw)
# not ready, to be continue ...
if self.use_stack_rnn == "False":
cell_fw = tf.contrib.rnn.GRUCell(self.hidden_size_rnn, reuse=tf.AUTO_REUSE)
cell_bw = tf.contrib.rnn.GRUCell(self.hidden_size_rnn, reuse=tf.AUTO_REUSE)
# query
(_, _), (query_output_fw, query_output_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw,
query_embed,
sequence_length=self.query_seq_length,
dtype=tf.float32)
query_rnn_output = tf.concat([query_output_fw, query_output_bw], axis=-1)
query_rnn_output = tf.nn.dropout(query_rnn_output, self.drop_out_prob)
# doc_pos
(_, _), (doc_pos_output_fw, doc_pos_output_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw,
doc_pos_embed,
sequence_length=self.pos_seq_length,
dtype=tf.float32)
doc_pos_rnn_output = tf.concat([doc_pos_output_fw, doc_pos_output_bw], axis=-1)
doc_pos_rnn_output = tf.nn.dropout(doc_pos_rnn_output, self.drop_out_prob)
# doc_neg
(_, _), (doc_neg_output_fw, doc_neg_output_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw,
doc_neg_embed,
sequence_length=self.neg_seq_length,
dtype=tf.float32)
doc_neg_rnn_output = tf.concat([doc_neg_output_fw, doc_neg_output_bw], axis=-1)
doc_neg_rnn_output = tf.nn.dropout(doc_neg_rnn_output, self.drop_out_prob)
with tf.name_scope('merge_negative_doc'):
# 合并负样本,tile可选择是否扩展负样本。
# doc_y = tf.tile(doc_positive_y, [1, 1])
doc_y = tf.tile(doc_pos_rnn_output, [1, 1])
for i in range(NEG):
for j in range(query_BS):
# slice(input_, begin, size)切片API
# doc_y = tf.concat([doc_y, tf.slice(doc_negative_y, [j * NEG + i, 0], [1, -1])], 0)
doc_y = tf.concat([doc_y, tf.slice(doc_neg_rnn_output, [j * NEG + i, 0], [1, -1])], 0)
with tf.name_scope('cosine_similarity'):
# Cosine similarity
# query_norm = sqrt(sum(each x^2))
query_norm = tf.tile(tf.sqrt(tf.reduce_sum(tf.square(query_rnn_output), 1, True)), [NEG + 1, 1])
# doc_norm = sqrt(sum(each x^2))
doc_norm = tf.sqrt(tf.reduce_sum(tf.square(doc_y), 1, True))
prod = tf.reduce_sum(tf.multiply(tf.tile(query_rnn_output, [NEG + 1, 1]), doc_y), 1, True)
norm_prod = tf.multiply(query_norm, doc_norm)
# cos_sim_raw = query * doc / (||query|| * ||doc||)
cos_sim_raw = tf.truediv(prod, norm_prod)
# gamma = 20
cos_sim = tf.transpose(tf.reshape(tf.transpose(cos_sim_raw), [NEG + 1, query_BS])) * 20
with tf.name_scope('loss'):
# Train Loss
# 转化为softmax概率矩阵。
prob = tf.nn.softmax(cos_sim)
# 只取第一列,即正样本列概率。
hit_prob = tf.slice(prob, [0, 0], [-1, 1])
self.loss = -tf.reduce_sum(tf.log(hit_prob))
tf.summary.scalar('loss', self.loss)
with tf.name_scope('train_op'):
# Optimizer
self.train_step = tf.train.AdamOptimizer(conf.learning_rate).minimize(self.loss)
with tf.name_scope('vali'):
self.average_loss = tf.placeholder(tf.float32)
self.loss_summary = tf.summary.scalar('average_loss', self.average_loss)
with tf.name_scope('Train'):
self.train_average_loss = tf.placeholder(tf.float32)
self.train_loss_summary = tf.summary.scalar('train_average_loss', self.train_average_loss)
def variable_summaries(self, var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.summary.scalar('sttdev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def get_optimizer(self):
"""
获得优化器
:return:
"""
optimizer = None
if self.config["optimization"] == "adam":
optimizer = tf.train.AdamOptimizer(self.config["learning_rate"])
if self.config["optimization"] == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(self.config["learning_rate"])
if self.config["optimization"] == "sgd":
optimizer = tf.train.GradientDescentOptimizer(self.config["learning_rate"])
return optimizer
def init_saver(self):
self.saver = tf.train.Saver(tf.global_variables())
def gen_data(self, data_map, batch_id):
query_in = data_map['query'][batch_id * query_BS:(batch_id + 1) * query_BS]
query_len = data_map['query_len'][batch_id * query_BS:(batch_id + 1) * query_BS]
doc_positive_in = data_map['doc_pos'][batch_id * query_BS:(batch_id + 1) * query_BS]
doc_positive_len = data_map['doc_pos_len'][batch_id * query_BS:(batch_id + 1) * query_BS]
doc_negative_in = data_map['doc_neg'][batch_id * query_BS * NEG:(batch_id + 1) * query_BS * NEG]
doc_negative_len = data_map['doc_neg_len'][batch_id * query_BS * NEG:(batch_id + 1) * query_BS * NEG]
# query_in, doc_positive_in, doc_negative_in = pull_all(query_in, doc_positive_in, doc_negative_in)
return query_in, doc_positive_in, doc_negative_in, query_len, doc_positive_len, doc_negative_len
def feed_dict(self, on_training, data_set, batch_id, drop_prob):
query_in, doc_positive_in, doc_negative_in, query_seq_len, pos_seq_len, neg_seq_len = self.gen_data(data_set,
batch_id)
query_len = len(query_in)
query_seq_len = [self.config["max_seq_len"]] * query_len
pos_seq_len = [self.config["max_seq_len"]] * query_len
neg_seq_len = [self.config["max_seq_len"]] * query_len * NEG
return {self.query_batch: query_in, self.doc_pos_batch: doc_positive_in, self.doc_neg_batch: doc_negative_in,
self.on_train: on_training, self.drop_out_prob: drop_prob, self.query_seq_length: query_seq_len,
self.neg_seq_length: neg_seq_len, self.pos_seq_length: pos_seq_len}
def train(self, sess, data_train, batch_id):
_, loss = sess.run([self.train_step, self.loss], feed_dict=self.feed_dict(False, data_train, batch_id, 0.5))
return loss
|
from dataclasses import dataclass
from sqlite3 import Connection
from typing import List, Optional
@dataclass
class Billing:
id: int
model_enabled: bool
billing_enabled: bool
bill_from: int
token_credit: int
cost_multiplier: int
user_id: str
model_id: int
class BillingRepository:
def __init__(self, con: Connection) -> None:
self._con = con
def get_all(self) -> List[Billing]:
rows = self._con.cursor().execute('''
SELECT *
FROM billing
''').fetchall()
return [
Billing(
id=r['id'],
model_enabled=bool(r['model_enabled']),
billing_enabled=bool(r['billing_enabled']),
bill_from=r['bill_from'],
token_credit=r['token_credit'],
cost_multiplier=r['cost_multiplier'],
user_id=r['user_id'],
model_id=r['model_id'],
)
for r in rows
]
def get_by_id(self, billing_id: int) -> Billing:
row = self._con.cursor().execute('''
SELECT *
FROM billing
WHERE id = ?
''', (
billing_id,
)).fetchone()
if (not row):
raise Exception(f'Billing account {billing_id} was not found')
return Billing(
id=row['id'],
model_enabled=bool(row['model_enabled']),
billing_enabled=bool(row['billing_enabled']),
bill_from=row['bill_from'],
token_credit=row['token_credit'],
cost_multiplier=row['cost_multiplier'],
user_id=row['user_id'],
model_id=row['model_id'],
)
def get_by_user_model(self, user_id: str, model_name: str) -> Optional[Billing]:
row = self._con.cursor().execute('''
SELECT b.*
FROM billing b
JOIN openai_model m on m.id = b.model_id
WHERE user_id = ? AND m.internal_name = ?
''', (
user_id,
model_name,
)).fetchone()
if (not row):
return None
return Billing(
id=row['id'],
model_enabled=bool(row['model_enabled']),
billing_enabled=bool(row['billing_enabled']),
bill_from=row['bill_from'],
token_credit=row['token_credit'],
cost_multiplier=row['cost_multiplier'],
user_id=row['user_id'],
model_id=row['model_id'],
)
def create(self, billing: Billing) -> Billing:
with self._con as con:
cur = con.cursor()
cur.execute('''
INSERT INTO billing (model_enabled, billing_enabled, bill_from, token_credit, cost_multiplier, user_id, model_id)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (
billing.model_enabled,
billing.billing_enabled,
billing.bill_from,
billing.token_credit,
billing.cost_multiplier,
billing.user_id,
billing.model_id,
))
return Billing(
id=cur.lastrowid, # type: ignore
model_enabled=billing.model_enabled,
billing_enabled=billing.billing_enabled,
bill_from=billing.bill_from,
token_credit=billing.token_credit,
cost_multiplier=billing.cost_multiplier,
user_id=billing.user_id,
model_id=billing.model_id,
)
def update(self, billing: Billing):
with self._con as con:
con.cursor().execute('''
UPDATE billing
SET
model_enabled = ?,
billing_enabled = ?,
bill_from = ?,
token_credit = ?,
cost_multiplier = ?
WHERE id = ?
''', (
billing.model_enabled,
billing.billing_enabled,
billing.bill_from,
billing.token_credit,
billing.cost_multiplier,
billing.id,
)) |
n=int(input("Enter any number"))
for i in range(n+1):
print(" ")
for j in range(i):
print((j+1)*2, end=' ')
|
def bigger(number1,number2):
if(number1 > number2):
return number1
else:
return number2
print bigger(2,7)
print bigger(3,2)
print bigger(3,3)
def is_friend(name):
return name[0]== 'D'or name[0] == 'N'
print is_friend('Diane')
print is_friend('NFred')
def biggest(number1,number2,number3):
return max(number1,number2,number3)
# if(number1 > number2):
# value = number1
# else:
# value = number2
# if(value > number3):
# return value
# else:
# return number3
print biggest(6,2,3)
i = 0
while i != 10:
i = i + 1
print i |
def solution(lines):
answer = 0
lines.sort(key=lambda x: x[0])
for i in range(len(lines) - 1):
if lines[i][1] - lines[i + 1][0] > 0:
answer += lines[i][1] - lines[i + 1][0]
print(lines)
return answer |
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# Originally contributed by Check Point Software Technologies, Ltd.
def choose_package(file_type, file_name):
"""Choose analysis package due to file type and file extension.
@param file_type: file type.
@return: package or None.
"""
if not file_type:
return None
file_type = file_type.lower()
file_name = file_name.lower()
if "apk" in file_name:
return "apk"
elif "zip" in file_type:
return "apk"
# elif "DEX" in file_type:
# return "dex"
else:
return "apk"
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given two integers n and k, return all possible combinations of k numbers out of 1 ... n.
#
# For example,
# If n = 4 and k = 2, a solution is:
#
# [
# [2,4],
# [3,4],
# [2,3],
# [1,2],
# [1,3],
# [1,4],
# ]
# Your runtime beats 100.00 % of python3 submissions.
# 27 / 27 test cases passed.
# Status: Accepted
# Runtime: 128 ms
from itertools import combinations
class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
return list(combinations(range(1, n + 1), k))
# Your runtime beats 48.86 % of python3 submissions.
# 27 / 27 test cases passed.
# Status: Accepted
# Runtime: 964 ms
class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
combs = [[]]
for _ in range(k):
combs = [[i] + c for c in combs for i in range(1, c[0] if c else n + 1)]
return combs
if __name__ == '__main__':
print(Solution().combine(4, 2))
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
'''
your salary:5000
1. Iphone 5800
2. Mac Pro 12000
3. Starbuck Latte 31
4. Alex Python 81
5. Bike 800
added [Iphone] to your Shopping Car!!
You
1. Iphone 5800
2. Mac Pro 12000
3. Starbuck Latte 31
4. Alex Pythn 81
5. Bike 800
have bought below:
[[iphone,5800],[bike,800]]
your balance : ...
'''
goodslist = [('Iphone',5800),('Mac Pro',12000),('Starbuck Latte',31),('Alex Python',81),('Bike',800)]
goods = []
own_list = []
def showlist(L):
for index,value in enumerate(L):
print("%d. %-15s %-5.0d"%(index+1,value[0],value[1]))
def joinownlist(chose):
chose = chose -1
goods.append(goodslist[chose])
own_list.extend(goods)
goods.clear()
def count(chose,salary):
chose = chose - 1
if salary >= goodslist[chose][1]:
salary = salary - goodslist[chose][1]
return salary
else:
return salary
own_salary = int(input("Input your salary:"))
goodslist.sort()
while True:
print("Welcome to Shop!")
showlist(goodslist)
chose = input("Input what your want:")
if chose.isdigit():
chose = int(chose)
if chose <= len(goodslist) and chose > 0:
salary = count(chose,own_salary)
print("aaaaa", salary)
print("bbbbb", own_salary)
if salary == own_salary:
print("1233333321")
print("你的余额只剩[%-4d]啦"%own_salary)
else:
own_salary = salary
print("ccccc", own_salary)
joinownlist(chose)
print("added \033[31;1m[%s]\033[0m to your shopping car,your current balance is \033[31;1m%4d\033[0m"%(goodslist[chose-1][0],own_salary))
elif chose == 'q':
break
else:
print("invalid option")
print('Have bought below:')
showlist(own_list)
print("Your balance : %-4d"%own_salary)
|
"""
This module contains the Login Menu class "LoginMenu".
@author: Shaun Hamelin-Owens
@author: Jason Cohen
@author: Sasithra Thanabalan
@author: Andrew Walker
"""
from display import LoginFrame
class LoginMenu():
"""
This class deals with the Login Menu components.
"""
def __init__(self, menu_manager):
"""
Class contructor for the Login Menu Class. \n
Initializes the window and menu components. \n
Sets up keybindings.
@param menu_manager: The game object (from org/Pacman.py)
"""
self.menu_manager = menu_manager
self.uiCoordinator = menu_manager.uiCoordinator
self.open_window = False
self.setupWindow()
self.setupBinds()
# Setting up keybindings
def setupBinds(self):
"""
Sets up keybindings.
"""
self.login_frame.guest.bind("<Button-1>", self.guestPlay)
self.login_frame.guest.bind("<Return>", self.guestPlay)
self.login_frame.logon.bind("<Button-1>", self.userLogon)
self.login_frame.logon.bind("<Return>", self.userLogon)
self.login_frame.exit.bind("<Button-1>", self.closeGame)
self.login_frame.exit.bind("<Return>", self.closeGame)
self.login_frame.new_user.bind("<Button-1>", self.newUser)
self.login_frame.new_user.bind("<Return>", self.newUser)
self.login_frame.forgot.bind("<Button-1>", self.forgotPass)
self.login_frame.forgot.bind("<Return>", self.forgotPass)
self.login_frame.iuname.bind("<Return>", self.userLogon)
self.login_frame.ipw.bind("<Return>", self.userLogon)
self.error_frame.error_button.bind("<Button-1>", self.closeError)
self.error_frame.error_button.bind("<Return>", self.closeError)
# Undo Keybinds
def unBind(self):
"""
Unbind keybindings.
"""
self.login_frame.guest.unbind("<Button-1>")
self.login_frame.logon.unbind("<Button-1>")
self.login_frame.exit.unbind("<Button-1>")
self.login_frame.new_user.unbind("<Button-1>")
self.login_frame.forgot.unbind("<Button-1>")
self.login_frame.iuname.unbind("<Return>")
self.login_frame.ipw.unbind("<Return>")
self.error_frame.error_button.unbind("<Button-1>")
self.error_frame.error_button.unbind("<Return>")
self.login_frame.guest.unbind("<Return>")
self.login_frame.logon.unbind("<Return>")
self.login_frame.exit.unbind("<Return>")
self.login_frame.new_user.unbind("<Return>")
self.login_frame.forgot.unbind("<Return>")
# Setting up menu GUI
def setupWindow(self):
"""
sets up the window.
"""
self.main_login_frame = LoginFrame.MLoginFrame(self.uiCoordinator)
self.login_frame = self.main_login_frame._lf
self.login_frame.iuname.focus_set()
self.error_frame = self.main_login_frame._ef
# Logging on
def userLogon(self, e):
"""
Fetchs username and password from entry widget. \n
With database, check if password corresponds to user. \n
If they don't match, it opens the error window. \n
If the username and password match, login to the game, start game with
the username, destroy login window, unbind keys.
@param e: Passed by keybinding, not actually used.
"""
username = self.login_frame.iuname.get()
password = self.login_frame.ipw.get()
### Check if password and username exist and match
if self.menu_manager.con.checkPassword(username, password):
self.menu_manager.user.username = username
self.menu_manager.user.highscore = self.menu_manager.con.getScore(username)
self.menu_manager.runMenu()
self.unBind()
self.main_login_frame.root.destroy()
### show Error window
else:
self.error_frame.error_message.configure(text = "Wrong Username"
" or Password")
self.main_login_frame.liftFrame(self.error_frame.error_frame)
self.error_frame.error_button.focus_set()
self.open_window = True
## hide Error window
def closeError(self, e):
"""
Close the error window and set focus on username entry.
@param e: Passed by keybinding, not actually used.
"""
self.main_login_frame.lowerFrame(self.error_frame.error_frame)
self.login_frame.eraseEntry()
self.login_frame.iuname.focus_set()
self.open_window = False
# Open forgot password
def forgotPass(self, e):
"""
Open forgot password window/menu, destroy logon window,unbind keys.
@param e: Passed by keybinding, not actually used.
"""
self.menu_manager.runForgot()
self.unBind()
self.main_login_frame.root.destroy()
# Create new user
def newUser(self, e):
"""
Open new user window/menu, unbind keys, destroy window.
@param e: Passed by keybinding, not actually used.
"""
self.menu_manager.runNewUser()
self.unBind()
self.main_login_frame.root.destroy()
# Play as a guest
def guestPlay(self, e):
"""
Start game menu as guest, destroy login window, unbind keys, setup
guest values.
@param e: Passed by keybinding, not actually used.
"""
self.menu_manager.user.username = "Guest"
self.menu_manager.user.highscore = 0
self.menu_manager.runMenu()
self.unBind()
self.main_login_frame.root.destroy()
# Exit the game
def closeGame(self, e):
"""
Close the game.
@param e: Passed by keybinding, not actually used.
"""
raise SystemExit
|
def select_rules(test_labels, test_transaction,combined_rule,default_label):
test_transactions=open(test_transaction,"r")
lines=test_transactions.readlines()
transactions=[]
for line in lines:
transactions.append(line.split())
combined_rules=open(combined_rule,"r")
lines=combined_rules.readlines()
rules=[]
for line in lines:
rules.append(line.split())
alllabels=[]
for transaction in transactions:
labels=[]
covered=0
applied_rules=[]
for j in range(0,len(rules)):
#if every feature of rules are in transaction
fit=1
for k in range(1,len(rules[j])-2):
if rules[j][k] not in transaction:
fit=0
break
if fit==1:
covered=1
confidence=float(rules[j][len(rules[j])-1])
support=float(rules[j][len(rules[j])-2])
label=rules[j][0]
applied_rules.append((confidence,support,label))
if covered==0:
labels=default_label
elif covered==1:
applied_rules.sort(reverse=True)
for i in range(0,5):
if i < len(applied_rules):
if applied_rules[i][2] not in labels:
if applied_rules[i][0]>90 or i==0:
labels.append(applied_rules[i][2])
alllabels.append(labels)
print "number of rules: "+str(len(rules))
f=open(test_labels,'w')
for item in alllabels:
f.write(" ".join(x for x in item)+"\n") |
"""
Under the assumption that both input sequences a and b stem from the same origin, a global alignment tries to identify
matching parts and the changes needed to transfer one sequence into the other. The changes are scored and an optimal set
of changes is identified, which defines an alignment. The dynamic programming approach tabularizes optimal subsolutions
in matrix D, where an entry Di,j represents the best score for aligning the prefixes a1..i with b1..j.
To better clarify how global alignment works, take a look here:
http://rna.informatik.uni-freiburg.de/Teaching/index.jsp?toolName=Needleman-Wunsch
Then, write a Python program that given two sequences (passed as first and second argument from command line) and match,
mismatch and gap costs (passed as third, fourth, fifth argument from command line):
1. Compute matrix D and output it on the terminal, along with the final alignment score
2. Output the final alignment (if two sequences have more than one alignment with the same score, provide one of them
e.g. check website for ‘AACCG’ and ‘AACG’)
3. Check your alignment on Freiburg website
Usage should be something like this:
python global_alignment.py AACCG AACG 1 -1 -2
Output:
Global alignment score: 2.0 [[ 0. -2. -4. -6. -8.]
[ -2. 1. -1. -3. -5.]
[ -4. -1. 2. 0. -2.]
[ -6. -3. 0. 3. 1.]
[ -8. -5. -2. 1. 2.]
[-10. -7. -4. -1. 2.]]
Final alignment: AACCG ||| | AAC-G
"""
import numpy as np
import pandas as pd
import sys
if len(sys.argv) < 6:
print("Error with the number of arguments in command line!\n")
exit(-1)
sequence_1 = sys.argv[1]
sequence_2 = sys.argv[2]
match = int(sys.argv[3])
mismatch = int(sys.argv[4])
gap = int(sys.argv[5])
n_rows = len(sequence_1) + 1
n_cols = len(sequence_2) + 1
gl_alignment = []
print("match=", match, " mismatch=", mismatch, "gap=", gap)
for i in range(n_rows):
tmp = []
tot_i = 0
tot_j = 0
for j in range(0, n_cols):
if i == 0 and j == 0:
tmp.append(0)
else:
if i == 0:
tot_i = gap * j
tmp.append(tot_i)
elif j == 0:
tot_j = tot_j + gap * i
tmp.append(tot_j)
gl_alignment.append(tmp)
sequence_1 = list(sequence_1)
sequence_2 = list(sequence_2)
print(sequence_1)
print(sequence_2)
gl_alignment = pd.DataFrame(data=gl_alignment)
for i, base_1 in zip(range(1, n_rows), sequence_1):
for j, base_2 in zip(range(1, n_cols), sequence_2):
candidates = []
# sim(i-1, j-1) + sim(X,Y)
if base_1 == base_2:
candidates.append(gl_alignment.iloc[i-1, j-1] + match)
else:
candidates.append(gl_alignment.iloc[i-1, j-1] + mismatch)
# sim(i-1, j) + gap
candidates.append(gl_alignment.iloc[i-1, j] + gap)
# sim(i, j-1) + gap
candidates.append(gl_alignment.iloc[i, j-1] + gap)
max_score = max(candidates)
gl_alignment.iloc[i, j] = max_score
# show matrix
print(gl_alignment)
# to finish for best alignment score
|
''' #==============================================================================
Dungeon Setup
''' #==============================================================================
class Room:
def __init__(self):
self.name = ""
self.description = ""
self.heroes = []
self.connected_rooms = {}
def create_room(self, room_name, rooms_dict, description):
self.name = room_name
self.connected_rooms = rooms_dict
self.description = description
class DungeonRooms:
def __init__(self):
self.name = ""
self.rooms_dict = {}
self.heroes = []
# Creates a bespoke Dungeon
def setup_dungeon(self, DungeonName):
self.name = DungeonName
room_1 = Room()
room_1.create_room("Entry Gate",
{"north": "South Street"},
"\nYou are at the Entry Gate.\n"
"You see a street to the north.\n")
room_2 = Room()
room_2.create_room("Large Well",
{"north": "The Davy Lamp"},
"\nYou are at the Large Well.\n"
"You see a Tavern north of you.\n")
room_3 = Room()
room_3.create_room("Helpington's General Store",
{"east": "South Street"},
"\nYou are at Helpington's General Store.\n"
"There is a street east of you. \n")
room_4 = Room()
room_4.create_room("South Street",
{"north": "Clock Tower", "south": "Entry Gate", "east": "The Davy Lamp", "west": "Helpington's General Store"},
"\nYou are at the south end of a long street.\n"
"You see a Clock Tower to the north of you in the centre of town. \n"
"There is a store to the west. \n"
"There is a Tavern to the east. \n"
"There is the entrance to town in the south. \n")
room_5 = Room()
room_5.create_room("The Davy Lamp",
{"south": "Large Well",
"west": "South Street"},
"\nYou are at the Tavern, 'The Davy Lamp'.\n"
"There is a Large Well south of you. \n"
"You see a street to the west. \n")
room_6 = Room()
room_6.create_room("Bank",
{"east": "Clock Tower"},
"\nYou are at the Bank.\n"
"You see a Clock Tower to the east of you. \n"
"There is a street south of you. \n")
room_7 = Room()
room_7.create_room("Clock Tower",
{"north": "North Street", "south": "South Street", "east": "Sherrif's Office", "west": "Bank"},
"\nYou are at the Clock Tower.\n"
"You see a street to the north. \n"
"There is a street to the south. \n"
"The Sherrif's Office is east of you. \n"
"There is a Bank to the west. \n")
room_8 = Room()
room_8.create_room("Sherrif's Office",
{"west": "Clock Tower"},
"\nYou are at the Sherrif's Office.\n"
"You see a Clock Tower west of you. \n")
room_9 = Room()
room_9.create_room("Fallen Temple",
{"north": "Cave", "east": "North Street"},
"\nYou are at the Fallen Temple.\n"
"You see a street east of you. \n"
"You see a Cave someways north of your position. \n")
room_10= Room()
room_10.create_room("North Street",
{"north": "Elder's Manor", "south": "Clock Tower", "east": "Quarry", "west": "Fallen Temple"},
"\nYou are at the Northern Street.\n"
"You see a large Manor north of you. \n"
"There is a Clock Tower south of you. \n"
"You see a Quarry far off in the east. \n"
"You can see a Temple far off in the distance west of you. \n")
room_11 = Room()
room_11.create_room("Quarry",
{"west": "North Street"},
"\nYou are at the Quarry.\n"
"You see a street far off to the west of you. \n")
room_12 = Room()
room_12.create_room("Cave",
{"south": "Fallen Temple", "east": "Witch's Hut"},
"\nYou are at the cave.\n"
"You see a Temple south of you. \n"
"You see a small hut to the east in the forest. \n")
room_13 = Room()
room_13.create_room("Witch's Hut",
{"north": "Stonefruit Farm","west": "Cave"},
"\nYou are deep in the forest outside a small hut.\n"
"You see a farm in the clearing north of you \n"
"The cave is westward. \n")
room_14 = Room()
room_14.create_room("Elder's Manor",
{"south": "North Street"},
"\nYou are at the Elder's Manor.\n"
"You see a street to the south. \n")
room_15 = Room()
room_15.create_room("Stonefruit Farm",
{"south": "Witch's Hut"},
"\nYou are at Stonefruit Farm.\n"
"You see a hut in the forest to the south. \n")
room_16 = Room()
room_16.create_room("The Void",
{"north": "Entry Gate", "south": "Entry Gate", "east": "Entry Gate", "west": "Entry Gate"},
"\nYou see white light in every direction and a frail figure in the middle of the room.\n"
"'...Find Me...'")
self.rooms_dict = {"Entry Gate": room_1, "Large Well": room_2, "Helpington's General Store": room_3,
"South Street": room_4, "The Davy Lamp": room_5, "Bank": room_6, "Clock Tower": room_7,
"Sherrif's Office": room_8, "Fallen Temple": room_9, "North Street": room_10,
"Quarry": room_11, "Cave": room_12, "Witch's Hut": room_13, "Elder's Manor": room_14,
"Stonefruit Farm": room_15, "The Void": room_16}
|
'''Task 0. Вводится строка. Необходимо посчитать
количество больших букв. Например:
[in]--> Привет, Андрей!
[out]--> 2
'''
message = input('Введите что-то: ')
# храню передаваемое сообщение
big = 0
bl = []
for letter in message:
if letter.isupper():
big += 1
bl.append(letter)
print(f'В строке {big} больших букв.')
print(f'Большие буквы: {bl}')
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 00:18:20 2020
"""
from PollStorage import PollStorage
from StoreModel import StoreModel
from SendReport import SendReport
from ModelSearch import ModelSearch
#poll the google storage bucket for user input
poll = PollStorage()
poll.start_polling()
#start searching for model / training
ms = ModelSearch()
ms.getLabels()
ms.createDataDirs()
modelType, epochAtMx = ms.findModel() #returns best model and epoch at max val acc
model_file = '%s_model.{:03d}.h5'.format(epochAtMx) % modelType
storeModel = StoreModel()
storeModel.store_model(local_file='./saved_models/{}'.format(model_file))
print(storeModel.get_model_link())
sendReport = SendReport(model_link=storeModel.get_model_link(),email_add=poll.get_email() \
,cm_img='confusion_matrix_bestModel.png' \
,loss_img='history_bestModel_loss.png' \
,acc_img='history_bestModel_accuracy.png' \
,model_sum='bestModel_summary.txt')
sendReport.sendEmail()
|
# 双指针 + 二分查找
# O(n^2log(n))能过
class Solution:
def triangleNumber(self, nums: List[int]) -> int:
n = len(nums)
if n < 3:
return 0
nums.sort()
res = 0
for i in range(n):
for j in range(i+1, n-1):
res += bisect_left(nums[j+1:], nums[j] + nums[i])
return res |
import prog
print('lul')
|
class FunctionConverterParams(object):
def __init__(
self, func=None, opset_version=None, input_names=None,
output_names=None, context=None):
"""Wrapper of converter parameters
Exporter set this parameters to the target converter's argument.
>>> def own_converter(params):
... # params is FunctionConverterParams
... # so enable to get each attributes:
... func_name = params.func.__class__.__name__
Arguments:
func (~chainer.FunctionNode): Target function.
opset_version (int): Target opset version.
input_names (list): List of input names.
output_names (list): List of ouptut names.
context (~onnx_chainer.context.Context): Context for Exporting
"""
self.func = func
self.opset_version = opset_version
self.input_names = input_names
self.output_names = output_names
self.context = context
class FunctionConverter(object):
def __init__(self, converter):
"""Wrapper of ONNX-Chainer converter
Exporter set arguments wrapped by ``FunctionConverterParams``, and
this class breaks downs to each argument.
Arguments:
converter (function): The target converter function.
"""
self.converter = converter
def __call__(self, params):
func = params.func
opset_version = params.opset_version
input_names = params.input_names
output_names = params.output_names
context = params.context
return self.converter(
func, opset_version, input_names, output_names, context)
|
# Generated by Django 3.1.5 on 2021-02-12 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20210212_1151'),
]
operations = [
migrations.CreateModel(
name='Artworks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=254)),
('artist_id', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('sold', models.BooleanField(default=False)),
('category', models.CharField(choices=[('PA', 'Painting'), ('DR', 'Drawing'), ('CE', 'Ceramics'), ('SC', 'Sculpture'), ('PR', 'Prints'), ('CA', 'Cards'), ('BO', 'Books'), ('MX', 'Mixed')], default='PA', max_length=2)),
('created_at', models.DateTimeField()),
('image_path', models.URLField(blank=True, max_length=1024, null=True)),
('height', models.DecimalField(decimal_places=2, max_digits=5)),
('width', models.DecimalField(decimal_places=2, max_digits=5)),
('depth', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
],
),
]
|
x = 7
i = 1
flag = 0
while i <= 100:
if (x % 2 == 1)and(x % 3 == 2)and(x % 5 == 4)and(x % 6 == 5)and(x % 6 == 5):
flag = 1
else:
x = 7*(i+1)
i += 1 #循环最重要的是条件要明确!!!!!!!
if flag ==1:
print("阶级数是:",x)
else:
print("不在程序限定范围内!")
|
from uc.itm import ProtocolWrapper, WrappedProtocolWrapper, WrappedPartyWrapper, wrappedPartyWrapper, wrappedProtocolWrapper
from uc.adversary import DummyWrappedAdversary
from uc.syn_ours import Syn_FWrapper, Syn_Channel
from prot_bracha import Syn_Bracha_Protocol
from f_bracha import Syn_Bracha_Functionality, RBC_Simulator, brachaSimulator
from broken_prot_bracha import Broken_Bracha_Protocol
from uc.execuc import execWrappedUC
from uc.utils import waits
import logging
import gevent
from numpy.polynomial.polynomial import Polynomial
log = logging.getLogger(__name__)
logging.basicConfig( level=50)
def env_equivocation(k, static, z2p, z2f, z2a, z2w, a2z, p2z, f2z, w2z, pump):
delta = 3
n = 4
sid = ('one', tuple(range(1,n+1)), delta)
static.write( (('sid', sid), ('crupt',(sid,1), (sid,10))) )
transcript = []
def _a2z():
while True:
m = waits(a2z)
transcript.append('a2z: ' + str(m.msg))
pump.write('dump')
def _p2z():
while True:
m = waits(p2z)
transcript.append('p2z: ' + str(m.msg))
pump.write('dump')
g1 = gevent.spawn(_a2z)
g2 = gevent.spawn(_p2z)
def t(s):
transcript.append('cmd: ' + str(s))
def channel_id(fro, to, r):
s = ('one', (sid,fro), (sid,to), r, delta)
return (s,'F_chan')
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,1)), ('send', ('VAL', 1))))), 4*n), 4*n)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,1)), ('send', ('VAL', 2))))), 4*n), 4*n)
waits(pump)
z2a.write( ('A2W', ('get-leaks',), n*(4*n+1)), n*(4*n+1))
waits(pump)
for _ in range(4):
z2w.write( ('poll',), 1)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2W', ('clock-round',))), 0) )
waits(pump)
z2a.write( ('A2W', ('exec', 4, 0), 0) )
waits(pump)
for _ in range(6):
z2a.write( ('A2W', ('exec', 7, 0), 0) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,7)), ('send', ('ECHO',1))))), 3), 3)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,7)), ('send', ('ECHO',2))))), 3), 3)
waits(pump)
for _ in range(3):
z2w.write( ('poll',), 1)
waits(pump)
for _ in range(8):
z2a.write( ('A2W', ('exec', 7 , 0), 0) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,7)), ('send', ('READY',1))))), 0), 0)
waits(pump)
z2a.write( ('A2W', ('exec', 7 , 0), 0) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,7)), ('send', ('READY',2))))), 0), 0)
waits(pump)
z2a.write( ('A2W', ('exec', 7 , 0), 0) )
waits(pump)
gevent.kill(g1)
gevent.kill(g2)
return transcript
def env_liveness(static, z2p, z2f, z2a, z2w, a2z, p2z, f2z, w2z, pump):
delta = 3
n = 4
sid = ('one', tuple(range(1,n+1)), delta)
static.write( (('sid', sid), ('crupt',(sid,1), (sid,10))) )
transcript = []
def _a2z():
while True:
m = waits(a2z)
transcript.append('a2z: ' + str(m.msg))
pump.write('dump')
def _p2z():
while True:
m = waits(p2z)
transcript.append('p2z: ' + str(m.msg))
pump.write('dump')
g1 = gevent.spawn(_a2z)
g2 = gevent.spawn(_p2z)
def t(s):
transcript.append('cmd: ' + str(s))
def channel_id(fro, to, r):
s = ('one', (sid,fro), (sid,to), r, delta)
return (s,'F_chan')
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,1)), ('send', ('VAL', 1)))))), 4*n)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,1)), ('send', ('VAL', 2)))))), 4*n)
waits(pump)
z2a.write( ('A2W', ('get-leaks',)), n*(4*n+1))
waits(pump)
for _ in range(4):
z2w.write( ('poll',), 1)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2W', ('clock-round',)))) )
waits(pump)
z2a.write( ('A2W', ('exec', 4, 0)) )
waits(pump)
for _ in range(6):
z2a.write( ('A2W', ('exec', 7, 0)) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,7)), ('send', ('ECHO',1)))))), 3)
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,7)), ('send', ('ECHO',2)))))), 3)
waits(pump)
for _ in range(3):
z2w.write( ('poll',), 1)
waits(pump)
for _ in range(8):
z2a.write( ('A2W', ('exec', 7 , 0)) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,2,7)), ('send', ('READY',1)))))), 0)
waits(pump)
z2a.write( ('A2W', ('exec', 7 , 0)) )
waits(pump)
z2a.write( ('A2P', ((sid,1), ('P2F', ((channel_id(1,3,7)), ('send', ('READY',2)))))), 0)
waits(pump)
z2a.write( ('A2W', ('exec', 7 , 0)) )
waits(pump)
gevent.kill(g1)
gevent.kill(g2)
return transcript
def distinguisher(t_ideal, t_real):
print('\n\t\033[93m \033[1m[Ideal Transcript] \033[0m')
for i in t_ideal: print(i)
print('\n\t\033[93m \033[1m [Real Transcript] \033[0m')
for i in t_real: print(i)
if t_ideal == t_real:
print("\033[92m[Distinguisher] They're the same\033[0m")
else:
print("\033[91m[Distinguisher] They're different\033[0m")
if __name__=='__main__':
print('\n\t\t\033[93m [IDEAL WORLD] \033[0m\n')
t1 = execWrappedUC(
128,
env_equivocation,
[('F_bracha',Syn_Bracha_Functionality)],
wrappedPartyWrapper('F_bracha'),
Syn_FWrapper,
brachaSimulator(Broken_Bracha_Protocol),
poly=Polynomial([100,2,3,4,5,6,7])
)
print('\n\t\t\033[93m [REAL WORLD] \033[0m\n')
t2 = execWrappedUC(
128,
env_equivocation,
[('F_chan',Syn_Channel)],
wrappedProtocolWrapper(Broken_Bracha_Protocol),
Syn_FWrapper,
DummyWrappedAdversary,
poly=Polynomial([100,2,3,4,5,6,7])
)
distinguisher(t1, t2)
|
#I pledge my honor that I have abided by the Stevens Honor System. Jill McDonald
#This function will accept a list of numbers and square each element of the
#list.
def squarelist(lst):
for i in range(len(lst)):
lst[i]=lst[i]*lst[i]
def main():
mylst=[0,1,2,3,4,5]
squarelist(mylst)
print(mylst)
main()
|
import os, subprocess
def disable_mouse_by_id(id):
subprocess.call('xinput -disable ' + str(id), shell=True)
def get_mouse_filename_from_id(id):
print "guessed mouse id: " + str(id)
event_address_all = subprocess.check_output("xinput list-props " + str(id) + " | grep -Eo '/dev/input/.*[0-9]{1,2}'", shell=True).rstrip()
event_address = event_address_all.split('/')[-1] # get last item
# look at /proc/bus/input/devices to see which mouse this event maps to
mouse_address = subprocess.check_output("cat /proc/bus/input/devices | grep -Eo 'mouse[0-9]{1,2} " + event_address + "'", shell=True).split()[0]
return '/dev/input/'+mouse_address
def make_mouse_readable(mouse_filename):
#first check if file is not readable
if not os.access(mouse_filename, os.R_OK):
#if not readable, change permissions
#this line might potentially ask for a password
subprocess.call('sudo chmod a+r ' + mouse_filename, shell=True)
def get_mouse_process_id():
xinput_id_output = subprocess.check_output("xinput list | grep -Eo 'ouse.*id\=[0-9]{1,2}' | grep -Eo '[0-9]{1,2}'", shell=True)
xinput_id_split = xinput_id_output.split("\n")
#assume it is the last device id
for candidate_id in reversed(xinput_id_split):
if len(candidate_id) > 0:
return int(candidate_id)
return None
|
from collections import Counter
from NeuralGraph.processing import data_parser, pd_to_pickle
from NeuralGraph.pickle_out import pickle_out
def main():
lst = []
with open('dataset/pre_data_all.txt','r') as f:
lst = f.readlines()
# tmp_lst = lst[90001:103001]
tmp_lst = lst[:1001]
tmp_lst = [line.strip().split('\t')[-1] for line in tmp_lst]
print(Counter(tmp_lst))
def mm():
data_path = '/home/ubuntu/wangzhongxu/gcnn2/NGFP/dataset'
save_dir = '/home/ubuntu/wangzhongxu/gcnn2/NGFP/dataset/pickle'
PD_FILE = 'pre_data_all.txt' # pre_data_all.txt
pd_all_lst = data_parser(data_path, pd_filename=PD_FILE)
print(len(pd_all_lst))
print(pd_all_lst[0])
pd_all_lst = [i[-1] for i in pd_all_lst]
print(Counter(pd_all_lst[:1001]))
# def po():
# train_set, valid_set = pickle_out(start=1, amount=1, random_state=None)
if __name__=='__main__':
main()
mm() |
import os
from sys import platform
# Folders
project_name = 'kernel-method-MVA'
project_dir = os.path.dirname(os.path.dirname(__file__))
results_dir = project_dir + '/results/'
data_dir = project_dir + '/data/'
fit_dir = project_dir + '/fit/'
ext ='.csv'
#dir_path = os.path.dirname(os.path.realpath(__file__)) # src
|
"""
剑指offer第13题,机器人的运动范围。
地上有一个m行n列的方格,从坐标 [0,0] 到坐标 [m-1,n-1] 。一个机器人从坐标 [0, 0] 的格子开始移动,它每次可以向左、右、上、下移动一格(不能移动到
方格外),也不能进入行坐标和列坐标的数位之和大于k的格子。例如,当k为18时,机器人能够进入方格 [35, 37] ,因为3+5+3+7=18。但它不能进入方格 [35, 38],
因为3+5+3+8=19。请问该机器人能够到达多少个格子?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ji-qi-ren-de-yun-dong-fan-wei-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 简单考虑,其实还是属于矩阵搜索问题,可以用深度优先算法来解决,其实和8皇后问题差不多。
def sums(num):
# 首先是数位相加的简单算法。
s = 0
while num != 0:
s+=num%10
num = num//10
return s
# 直接用深度优先算法强行暴力破解,不过算法运行时间比较长,这也是没办法的事情,回头再来考虑时间复杂度小一点的。
visitPoint = []
def movingCount(x,y,k,m,n):
if sums(x)+sums(y) > k or x<0 or x>=m or y <0 or y>= n or [x,y] in visitPoint:
return 0
else:
visitPoint.append([x,y])
return 1 + movingCount(x,y+1,k,m,n) + movingCount(x+1,y,k,m,n)
if __name__ == '__main__':
res = movingCount(0,0,9,15,38)
print(res) |
def identity(x):
return x
class Empty(object):
def __len__(self):
return 0
def to_list(self):
return []
class Singleton(object):
def __init__(self, value):
self.value = value
def __len__(self):
return 1
def to_list(self):
return [self.value]
class Concat(object):
def __init__(self, left, right=None):
self.left = left
# TODO: No test case for this yet.
# if right is None:
# right = Empty
self.right = right
def __len__(self):
return len(self.left) + len(self.right)
def to_list(self):
return self.left.to_list() + self.right.to_list()
class SingleThreadedMultiplexor(object):
def map_reduce(self, clist, mapf, reducef, initial=None):
if isinstance(clist, Empty):
return self._map_reduce_empty(clist, initial)
if isinstance(clist, Singleton):
return self._map_reduce_singleton(clist, mapf, reducef, initial)
if isinstance(clist, Concat):
return self._map_reduce_concat(clist, mapf, reducef, initial)
raise TypeError("Unexpected clist type: %s" % clist.__class__.__name__)
def _map_reduce_empty(self, clist, initial):
if initial is not None:
return initial
raise TypeError("map_reduce() of empty sequence with no initial value")
def _map_reduce_singleton(self, clist, mapf, reducef, initial):
if mapf is None:
mapf = identity
result = mapf(clist.value)
if initial:
result = reducef(initial, result)
return result
def _map_reduce_concat(self, clist, mapf, reducef, initial):
def sub_map_reduce(target, x_initial=None):
return self.map_reduce(target, mapf, reducef, initial=x_initial)
if clist.left and clist.right:
result = reducef(sub_map_reduce(clist.left, x_initial=initial),
sub_map_reduce(clist.right))
elif clist.left:
result = sub_map_reduce(clist.left, x_initial=initial)
elif clist.right:
result = sub_map_reduce(clist.right, x_initial=initial)
return result
|
#!/usr/bin/env python3
"""
The second-level axes subclass used for all ProPlot figures.
Implements plotting method overrides.
"""
import functools
import inspect
import itertools
import re
import sys
from numbers import Integral
import matplotlib.axes as maxes
import matplotlib.cm as mcm
import matplotlib.collections as mcollections
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
import numpy as np
import numpy.ma as ma
from .. import colors as pcolors
from .. import constructor
from .. import ticker as pticker
from ..config import rc
from ..internals import ic # noqa: F401
from ..internals import (
_guide_kw_to_arg,
_guide_kw_to_obj,
_keyword_to_positional,
_not_none,
_pop_kwargs,
_pop_params,
_pop_props,
_process_props,
_snippet_manager,
_state_context,
docstring,
warnings,
)
from ..utils import edges, edges2d, to_xyz
from . import base
try:
from cartopy.crs import PlateCarree
except ModuleNotFoundError:
PlateCarree = object
__all__ = ['PlotAxes']
# Constants
EDGEWIDTH = 0.25 # native linewidth used for grid box edges in matplotlib
BASEMAP_FUNCS = ( # default latlon=True
'barbs', 'contour', 'contourf', 'hexbin',
'imshow', 'pcolor', 'pcolormesh', 'plot',
'quiver', 'scatter', 'streamplot', 'step',
)
CARTOPY_FUNCS = ( # default transform=PlateCarree()
'barbs', 'contour', 'contourf',
'fill', 'fill_between', 'fill_betweenx', # NOTE: not sure if these work
'imshow', 'pcolor', 'pcolormesh', 'plot',
'quiver', 'scatter', 'streamplot', 'step',
'tricontour', 'tricontourf', 'tripcolor', # NOTE: not sure why these work
)
# Data argument docstrings
_args_1d_docstring = """
*args : {y} or {x}, {y}
The data passed as positional or keyword arguments. Interpreted as follows:
* If only `{y}` coordinates are passed, try to infer the `{x}` coordinates
from the `~pandas.Series` or `~pandas.DataFrame` indices or the
`~xarray.DataArray` coordinates. Otherwise, the `{x}` coordinates
are ``np.arange(0, {y}.shape[0])``.
* If the `{y}` coordinates are a 2D array, plot each column of data in succession
(except where each column of data represents a statistical distribution, as with
``boxplot``, ``violinplot``, or when using ``means=True`` or ``medians=True``).
* If any arguments are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`.
A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
_args_1d_multi_docstring = """
*args : {y}2 or {x}, {y}2, or {x}, {y}1, {y}2
The data passed as positional or keyword arguments. Interpreted as follows:
* If only `{y}` coordinates are passed, try to infer the `{x}` coordinates from
the `~pandas.Series` or `~pandas.DataFrame` indices or the `~xarray.DataArray`
coordinates. Otherwise, the `{x}` coordinates are ``np.arange(0, {y}2.shape[0])``.
* If only `{x}` and `{y}2` coordinates are passed, set the `{y}1` coordinates
to zero. This draws elements originating from the zero line.
* If both `{y}1` and `{y}2` are provided, draw elements between these points. If
either are 2D, draw elements by iterating over each column.
* If any arguments are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`.
A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
_args_2d_docstring = """
*args : {z} or x, y, {z}
The data passed as positional or keyword arguments. Interpreted as follows:
* If only {zvar} coordinates are passed, try to infer the `x` and `y` coordinates
from the `~pandas.DataFrame` indices and columns or the `~xarray.DataArray`
coordinates. Otherwise, the `y` coordinates are ``np.arange(0, y.shape[0])``
and the `x` coordinates are ``np.arange(0, y.shape[1])``.
* For ``pcolor`` and ``pcolormesh``, calculate coordinate *edges* using
`~proplot.utils.edges` or `~proplot.utils.edges2d` if *centers* were provided.
For all other methods, calculate coordinate *centers* if *edges* were provided.
* If the `x` or `y` coordinates are `pint.Quantity`, auto-add the pint unit registry
to matplotlib's unit registry using `~pint.UnitRegistry.setup_matplotlib`. If the
{zvar} coordinates are `pint.Quantity`, pass the magnitude to the plotting
command. A `pint.Quantity` embedded in an `xarray.DataArray` is also supported.
"""
_snippet_manager['plot.args_1d_y'] = _args_1d_docstring.format(x='x', y='y')
_snippet_manager['plot.args_1d_x'] = _args_1d_docstring.format(x='y', y='x')
_snippet_manager['plot.args_1d_multiy'] = _args_1d_multi_docstring.format(x='x', y='y')
_snippet_manager['plot.args_1d_multix'] = _args_1d_multi_docstring.format(x='y', y='x')
_snippet_manager['plot.args_2d'] = _args_2d_docstring.format(z='z', zvar='`z`')
_snippet_manager['plot.args_2d_flow'] = _args_2d_docstring.format(z='u, v', zvar='`u` and `v`') # noqa: E501
# Shared docstrings
_args_1d_shared_docstring = """
data : dict-like, optional
A dict-like dataset container (e.g., `~pandas.DataFrame` or
`~xarray.DataArray`). If passed, positional arguments can optionally
be string `data` keys and the arrays used for plotting are retrieved
with ``data[key]``. This is a `native matplotlib feature
<https://matplotlib.org/stable/gallery/misc/keyword_plotting.html>`__.
autoformat : bool, optional
Whether the `x` axis labels, `y` axis labels, axis formatters, axes titles,
legend titles, and colorbar labels are automatically configured when a
`~pandas.Series`, `~pandas.DataFrame`, `~xarray.DataArray`, or `~pint.Quantity`
is passed to the plotting command. Default is :rc:`autoformat`.
"""
_args_2d_shared_docstring = """
%(plot.args_1d_shared)s
order : {{'C', 'F'}}, optional
If ``'C'`` (C-style row-major order), `z` coordinates should be shaped
``(y, x)``. If ``'F'`` (Fortran-style column-major order) `z` coordinates
should be shaped ``(x, y)``. Default is ``'C'``.
globe : bool, optional
For `proplot.axes.GeoAxes` only. Whether to enforce global coverage.
Default is ``False``. When set to ``True`` this does the following:
#. Interpolates input data to the North and South poles by setting the data
values at the poles to the mean from latitudes nearest each pole.
#. Makes meridional coverage "circular", i.e. the last longitude coordinate
equals the first longitude coordinate plus 360\N{DEGREE SIGN}.
#. When basemap is the backend, cycles 1D longitude vectors to fit within
the map edges. For example, if the central longitude is 90\N{DEGREE SIGN},
the data is shifted so that it spans -90\N{DEGREE SIGN} to 270\N{DEGREE SIGN}.
"""
_snippet_manager['plot.args_1d_shared'] = _args_1d_shared_docstring
_snippet_manager['plot.args_2d_shared'] = _args_2d_shared_docstring
# Auto colorbar and legend docstring
_guide_docstring = """
colorbar : bool, int, or str, optional
If not ``None``, this is a location specifying where to draw an
*inset* or *panel* colorbar from the resulting object(s). If ``True``,
the default :rc:`colorbar.loc` is used. Valid locations are described
in `~proplot.axes.Axes.colorbar`.
colorbar_kw : dict-like, optional
Extra keyword args for the call to `~proplot.axes.Axes.colorbar`.
legend : bool, int, or str, optional
If not ``None``, this is a location specifying where to draw an *inset*
or *panel* legend from the resulting object(s). If ``True``, the default
:rc:`legend.loc` is used. Valid locations are described in
`~proplot.axes.Axes.legend`.
legend_kw : dict-like, optional
Extra keyword args for the call to `~proplot.axes.Axes.legend`.
"""
_snippet_manager['plot.guide'] = _guide_docstring
# Misc shared 1D plotting docstrings
_inbounds_docstring = """
inbounds : bool, optional
Whether to restrict the default `y` (`x`) axis limits to account for only
in-bounds data when the `x` (`y`) axis limits have been locked. Default
is :rc:`axes.inbounds`. See also :rcraw:`cmap.inbounds`.
"""
_error_means_docstring = """
mean, means : bool, optional
Whether to plot the means of each column for 2D `{y}` coordinates. Means
are calculated with `numpy.nanmean`. If no other arguments are specified,
this also sets ``barstd=True`` (and ``boxstd=True`` for violin plots).
median, medians : bool, optional
Whether to plot the medians of each column for 2D `{y}` coordinates. Medians
are calculated with `numpy.nanmedian`. If no other arguments arguments are
specified, this also sets ``barstd=True`` (and ``boxstd=True`` for violin plots).
"""
_error_bars_docstring = """
barstd, barstds : bool, float, or 2-tuple of float, optional
*Valid only if `mean` or `median` is ``True``*. Standard deviation multiples for
*thin error bars* with optional whiskers (i.e., caps). If scalar, then +/- that
multiple is used. If ``True``, the default standard deviation range of +/-3 is used.
barpctile, barpctiles : bool, float, or 2-tuple of float, optional
*Valid only if `mean` or `median` is ``True``*. As with `barstd`, but instead
using *percentiles* for the error bars. If scalar, that percentile range is
used (e.g., ``90`` shows the 5th to 95th percentiles). If ``True``, the default
percentile range of 0 to 100 is used.
bardata : 2D array or 1D array, optional
*Valid only if `mean` and `median` are ``False``*. If shape is 2 x N, these
are the lower and upper bounds for the thin error bars. If shape is N, these
are the absolute, symmetric deviations from the central points.
boxstd, boxstds, boxpctile, boxpctiles, boxdata : optional
As with `barstd`, `barpctile`, and `bardata`, but for *thicker error bars*
representing a smaller interval than the thin error bars. If `boxstd` is
``True``, the default standard deviation range of +/-1 is used. If `boxpctiles`
is ``True``, the default percentile range of 25 to 75 is used (i.e., the
interquartile range). When "boxes" and "bars" are combined, this has the
effect of drawing miniature box-and-whisker plots.
capsize : float, optional
The cap size for thin error bars in points. Default is :rc:`errorbar.capsize`.
barz, barzorder, boxz, boxzorder : float, optional
The "zorder" for the thin and thick error bars. Default is ``2.5``.
barc, barcolor, boxc, boxcolor : color-spec, optional
Colors for the thin and thick error bars. Default is
:rc:`boxplot.whiskerprops.color`.
barlw, barlinewidth, boxlw, boxlinewidth : float, optional
Line widths for the thin and thick error bars, in points. The defaults
:rc:`boxplot.whiskerprops.linewidth` (bars) and four times that value (boxes).
boxm, boxmarker : bool or marker-spec, optional
Whether to draw a small marker in the middle of the box denoting the mean or
median position. Ignored if `boxes` is ``False``. Default is ``'o'``.
boxms, boxmarkersize : size-spec, optional
The marker size for the `boxmarker` marker in points ** 2. Default size
is equal to ``(2 * boxlinewidth) ** 2``.
boxmc, boxmarkercolor, boxmec, boxmarkeredgecolor : color-spec, optional
Color, face color, and edge color for the `boxmarker` marker. Default color
and edge color are ``'w'``.
"""
_error_shading_docstring = """
shadestd, shadestds, shadepctile, shadepctiles, shadedata : optional
As with `barstd`, `barpctile`, and `bardata`, but using *shading* to indicate
the error range. If `shadestds` is ``True``, the default standard deviation
range of +/-2 is used. If `shadepctiles` is ``True``, the default
percentile range of 10 to 90 is used.
fadestd, fadestds, fadepctile, fadepctiles, fadedata : optional
As with `shadestd`, `shadepctile`, and `shadedata`, but for an additional,
more faded, *secondary* shaded region. If `fadestds` is ``True``, the default
standard deviation range of +/-3 is used. If `fadepctiles` is ``True``,
the default percentile range of 0 to 100 is used.
shadec, shadecolor, fadec, fadecolor : color-spec, optional
Colors for the different shaded regions. Default is to inherit the parent color.
shadez, shadezorder, fadez, fadezorder : float, optional
The "zorder" for the different shaded regions. Default is ``1.5``.
shadea, shadealpha, fadea, fadealpha : float, optional
The opacity for the different shaded regions. Defaults are ``0.4`` and ``0.2``.
shadelw, shadelinewidth, fadelw, fadelinewidth : float, optional
The edge line width for the shading patches. Default is :rc:`patch.linewidth`.
shdeec, shadeedgecolor, fadeec, fadeedgecolor : float, optional
The edge color for the shading patches. Default is ``'none'``.
shadelabel, fadelabel : bool or str, optional
Labels for the shaded regions to be used as separate legend entries. To toggle
labels "on" and apply a *default* label, use e.g. ``shadelabel=True``. To apply
a *custom* label, use e.g. ``shadelabel='label'``. Otherwise, the shading is
drawn underneath the line and/or marker in the legend entry.
"""
_snippet_manager['plot.inbounds'] = _inbounds_docstring
_snippet_manager['plot.error_means_y'] = _error_means_docstring.format(y='y')
_snippet_manager['plot.error_means_x'] = _error_means_docstring.format(y='x')
_snippet_manager['plot.error_bars'] = _error_bars_docstring
_snippet_manager['plot.error_shading'] = _error_shading_docstring
# Color docstrings
_cycle_docstring = """
cycle : cycle-spec, optional
The cycle specifer, passed to the `~proplot.constructor.Cycle` constructor.
If the returned cycler is unchanged from the current cycler, the axes
cycler will not be reset to its first position. To disable property cycling
and just use black for the default color, use ``cycle=False``, ``cycle='none'``,
or ``cycle=()`` (analogous to disabling ticks with e.g. ``xformatter='none'``).
To restore the default property cycler, use ``cycle=True``.
cycle_kw : dict-like, optional
Passed to `~proplot.constructor.Cycle`.
"""
_cmap_norm_docstring = """
cmap : colormap-spec, optional
The colormap specifer, passed to the `~proplot.constructor.Colormap`
constructor function.
cmap_kw : dict-like, optional
Passed to `~proplot.constructor.Colormap`.
norm : norm-spec, optional
The continuous colormap normalizer, passed to the `~proplot.constructor.Norm`
constructor function. If `discrete` is ``True`` this is also used to normalize
values passed to `~proplot.colors.DiscreteNorm` before colors is selected.
norm_kw : dict-like, optional
Passed to `~proplot.constructor.Norm`.
discrete : bool, optional
If ``False``, then `~proplot.colors.DiscreteNorm` is not applied to the
colormap. Instead, for non-contour plots, the number of levels will be
roughly controlled by :rcraw:`cmap.lut`. This has a similar effect to
using `levels=large_number` but it may improve rendering speed. Default
is ``False`` for `~proplot.axes.Axes.imshow`, `~proplot.axes.Axes.matshow`,
`~proplot.axes.Axes.spy`, `~proplot.axes.Axes.hexbin`, `~proplot.axes.Axes.hist2d`,
and `~proplot.axes.Axes.heatmap` plots, but ``True`` otherwise.
sequential : bool, optional
Use :rc:`cmap.sequential` as the default colormap.
diverging : bool, optional
Use :rc:`cmap.diverging` as the default colormap and use
`~proplot.colors.DivergingNorm` as the default continuous normalizer.
This will also ensure auto-generated levels include a value at zero.
cyclic : bool, optional
Use :rc:`cmap.cyclic` as the default colormap and modify the default
arguments passed to `~proplot.colors.DiscreteNorm` so that colors
on either end are distinct.
sequential, diverging, cyclic, qualitative : bool, optional
Boolean arguments used if `cmap` is not passed. Set these to ``True``
to use the default :rcraw:`cmap.sequential`, :rcraw:`cmap.diverging`,
:rcraw:`cmap.cyclic`, and :rcraw:`cmap.qualitative` colormaps. The
latter three options also change level- and norm-generation behavior.
extend : {{'neither', 'min', 'max', 'both'}}, optional
Whether to assign unique colors to out-of-bounds data and draw
colorbar "extensions" when a colorbar is drawn.
"""
_snippet_manager['plot.cycle'] = _cycle_docstring
_snippet_manager['plot.cmap_norm'] = _cmap_norm_docstring
# Levels docstrings
# NOTE: In some functions we only need some components
_vlim_levels_docstring = """
vmin, vmax : float, optional
Used to determine level locations if `levels` or `values` is an integer.
Actual levels may not fall exactly on `vmin` and `vmax`, but the minimum
level will be no smaller than `vmin` and the maximum level will be
no larger than `vmax`. If `vmin` or `vmax` are not provided, the
minimum and maximum data values are used.
"""
_manual_levels_docstring = """
N
Shorthand for `levels`.
levels : int or sequence of float, optional
The number of level edges or a sequence of level edges. If the former,
`locator` is used to generate this many level edges at "nice" intervals.
If the latter, the levels should be monotonically increasing or
decreasing (note that decreasing levels will only work with ``pcolor``
plots, not ``contour`` plots). Default is :rc:`cmap.levels`.
values : int or sequence of float, optional
The number of level centers or a sequence of level centers. If the former,
`locator` is used to generate this many level centers at "nice" intervals.
If the latter, levels are inferred using `~proplot.utils.edges`.
This will override any `levels` input.
"""
_auto_levels_docstring = """
robust : bool, float, or 2-tuple, optional
If ``True`` and `vmin` or `vmax` were not provided, they are
determined from the 2nd and 98th data percentiles rather than the
minimum and maximum. If float, this percentile range is used (for example,
``90`` corresponds to the 5th to 95th percentiles). If 2-tuple of float,
these specific percentiles should be used. This feature is useful
when your data has large outliers. Default is :rc:`cmap.robust`.
inbounds : bool, optional
If ``True`` and `vmin` or `vmax` were not provided, when axis limits
have been explicitly restricted with `~matplotlib.axes.Axes.set_xlim`
or `~matplotlib.axes.Axes.set_ylim`, out-of-bounds data is ignored.
Default is :rc:`cmap.inbounds`. See also :rcraw:`axes.inbounds`.
locator : locator-spec, optional
The locator used to determine level locations if `levels` or `values`
is an integer. Passed to the `~proplot.constructor.Locator` constructor.
Default is `~matplotlib.ticker.MaxNLocator` with ``levels`` integer levels.
locator_kw : dict-like, optional
Passed to `~proplot.constructor.Locator`.
symmetric : bool, optional
If ``True``, automatically generated levels are symmetric about zero.
Default is always ``False``.
positive : bool, optional
If ``True``, automatically generated levels are positive with a minimum at zero.
Default is always ``False``.
negative : bool, optional
If ``True``, automatically generated levels are negative with a maximum at zero.
Default is always ``False``.
nozero : bool, optional
If ``True``, ``0`` is removed from the level list. This is mainly useful for
single-color `~matplotlib.axes.Axes.contour` plots.
"""
_snippet_manager['plot.levels_vlim'] = _vlim_levels_docstring
_snippet_manager['plot.levels_manual'] = _manual_levels_docstring
_snippet_manager['plot.levels_auto'] = _auto_levels_docstring
# Labels docstrings
_labels_1d_docstring = """
label, value : float or str, optional
The single legend label or colorbar coordinate to be used for this plotted
element. This is generally used with 1D input coordinates.
labels, values : sequence of float or sequence of str, optional
The legend labels or colorbar coordinates used for each plotted element.
Can be numeric or string, and must match the number of plotted elements.
This is generally used with 2D input coordinates.
"""
_labels_2d_docstring = """
labels : bool, optional
Whether to apply labels to contours and grid boxes. The text will be
white when the luminance of the underlying filled contour or grid box
is less than 50 and black otherwise.
labels_kw : dict-like, optional
Ignored if `labels` is ``False``. Extra keyword args for the labels.
For contour plots, this is passed to `~matplotlib.axes.Axes.clabel`.
Otherwise, this is passed to `~matplotlib.axes.Axes.text`.
fmt : format-spec, optional
Passed to the `~proplot.constructor.Norm` constructor, used to format
number labels. You can also use the `precision` keyword arg.
precision : int, optional
Maximum number of decimal places for the number labels. Number labels
are generated with the `~proplot.ticker.SimpleFormatter` formatter,
which permits limiting the precision.
label : str, optional
The legend label to be used for this object. In the case of
contours, this is paired with the the central artist in the artist
list returned by `matplotlib.contour.ContourSet.legend_elements`.
"""
_snippet_manager['plot.labels_1d'] = _labels_1d_docstring
_snippet_manager['plot.labels_2d'] = _labels_2d_docstring
# Plot docstring
_plot_docstring = """
Plot standard lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.error_shading)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.plot`.
See also
--------
PlotAxes.plot
PlotAxes.plotx
matplotlib.axes.Axes.plot
"""
_snippet_manager['plot.plot'] = _plot_docstring.format(y='y')
_snippet_manager['plot.plotx'] = _plot_docstring.format(y='x')
# Step docstring
# NOTE: Internally matplotlib implements step with thin wrapper of plot
_step_docstring = """
Plot step lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.step`.
See also
--------
PlotAxes.step
PlotAxes.stepx
matplotlib.axes.Axes.step
"""
_snippet_manager['plot.step'] = _step_docstring.format(y='y')
_snippet_manager['plot.stepx'] = _step_docstring.format(y='x')
# Stem docstring
_stem_docstring = """
Plot stem lines.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.guide)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.stem`.
"""
_snippet_manager['plot.stem'] = _stem_docstring.format(y='x')
_snippet_manager['plot.stemx'] = _stem_docstring.format(y='x')
# Lines docstrings
_lines_docstring = """
Plot {orientation} lines.
Parameters
----------
%(plot.args_1d_multi{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` coordinates. If this
is ``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to color lines greater than zero with `poscolor` and lines less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive lines. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
c, color, colors : color-spec or sequence, optional
The line color(s).
ls, linestyle, linestyles : str or sequence, optional
The line style(s).
lw, linewidth, linewidths : float or sequence, optional
The line width(s).
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.{prefix}lines`.
See also
--------
PlotAxes.vlines
PlotAxes.hlines
matplotlib.axes.Axes.vlines
matplotlib.axes.Axes.hlines
"""
_snippet_manager['plot.vlines'] = _lines_docstring.format(
y='y', prefix='v', orientation='vertical'
)
_snippet_manager['plot.hlines'] = _lines_docstring.format(
y='x', prefix='h', orientation='horizontal'
)
# Scatter docstring
_parametric_docstring = """
Plot a parametric line.
Parameters
----------
%(plot.args_1d_y)s
c, color, colors, values : array-like, optional
The parametric coordinate. These can be passed as a third
positional argument or as a keyword argument. They can also
be string labels instead of numbers and the resulting
colorbar ticks will be labeled accordingly.
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_vlim)s
%(plot.guide)s
interp : int, optional
Interpolate to this many additional points between the parametric
coordinates. Default is ``0``. This can be increased to make the color
gradations between a small number of coordinates appear "smooth".
scalex, scaley : bool, optional
Whether the view limits are adapted to the data limits. The values are
passed on to `~matplotlib.axes.Axes.autoscale_view`.
%(plot.inbounds)s
**kwargs
Valid `~matplotlib.collections.LineCollection` properties.
Returns
-------
`~matplotlib.collections.LineCollection`
The parametric line. See `this matplotlib example \
<https://matplotlib.org/stable/gallery/lines_bars_and_markers/multicolored_line>`__.
See also
--------
PlotAxes.plot
PlotAxes.plotx
matplotlib.collections.LineCollection
"""
_snippet_manager['plot.parametric'] = _parametric_docstring
# Scatter function docstring
_scatter_docstring = """
Plot markers with flexible keyword arguments.
Parameters
----------
%(plot.args_1d_{y})s
s, size, ms, markersize : float or sequence of float, optional
The marker size(s). If this is an array matching the shape of `x` and `y`,
the units are scaled by `smin` and `smax`.
c, color, colors, mc, markercolor, markercolors \
: array-like or color-spec, optional
The marker fill color(s). If this is an array matching the shape of `x` and `y`,
the colors are generated using `cmap`, `norm`, `vmin`, and `vmax`.
smin, smax : float, optional
The minimum and maximum marker size in units ``points**2`` used to scale
`s`. If not provided, the marker sizes are equivalent to the values in `s`.
%(plot.levels_vlim)s
%(plot.args_1d_shared)s
Other parameters
----------------
lw, linewidth, linewidths, markeredgewidth, markeredgewidths \
: float or sequence, optional
The marker edge width.
edgecolors, markeredgecolor, markeredgecolors \
: color-spec or sequence, optional
The marker edge color.
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_auto)s
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.error_shading)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.scatter`.
See also
--------
PlotAxes.scatter
PlotAxes.scatterx
matplotlib.axes.Axes.scatter
"""
_snippet_manager['plot.scatter'] = _scatter_docstring.format(y='y')
_snippet_manager['plot.scatterx'] = _scatter_docstring.format(y='x')
# Bar function docstring
_bar_docstring = """
Plot individual, grouped, or stacked bars.
Parameters
----------
%(plot.args_1d_{y})s
width : float or array-like, optional
The width(s) of the bars relative to the {x} coordinate step size.
Can be passed as a third positional argument.
{bottom} : float or array-like, optional
The coordinate(s) of the {bottom} edge of the bars. Default is
``0``. Can be passed as a fourth positinal argument.
absolute_width : bool, optional
Whether to make the `width` units *absolute*. If ``True``, this
restores the default matplotlib behavior. Default is ``False``.
%(plot.args_1d_shared)s
Other parameters
----------------
stack, stacked : bool, optional
Whether to stack columns of the input array or plot the bars
side-by-side in groups.
negpos : bool, optional
Whether to shade bars greater than zero with `poscolor` and bars less
than zero with `negcolor`.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive bars. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
lw, linewidth, linewidths : float, optional
The edge width for the bar patches.
ec, edgecolor, edgecolors : color-spec, optional
The edge color for the bar patches.
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.error_means_{y})s
%(plot.error_bars)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.bar{suffix}`.
See also
--------
PlotAxes.bar
PlotAxes.barh
matplotlib.axes.Axes.bar
matplotlib.axes.Axes.barh
"""
_snippet_manager['plot.bar'] = _bar_docstring.format(
x='x', y='y', bottom='bottom', suffix=''
)
_snippet_manager['plot.barh'] = _bar_docstring.format(
x='y', y='x', bottom='left', suffix='h'
)
# Area plot docstring
_fill_docstring = """
Plot individual, grouped, or overlaid shading patches.
Parameters
----------
%(plot.args_1d_multi{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
stack, stacked : bool, optional
Whether to "stack" successive columns of the `{y}1` array. If this is
``True`` and `{y}2` was provided, it will be ignored.
negpos : bool, optional
Whether to shade where ``{y}1 >= {y}2`` with `poscolor` and where ``{y}1 < {y}2``
with `negcolor`. For example, to shade positive values red and negative values
blue, simply use ``ax.fill_between{suffix}({x}, {y}, negpos=True)``.
negcolor, poscolor : color-spec, optional
Colors to use for the negative and positive shaded regions. Ignored if `negpos`
is ``False``. Defaults are :rc:`negcolor` and :rc:`poscolor`.
where : ndarray, optional
Boolean ndarray mask for points you want to shade. See `this example \
<https://matplotlib.org/stable/gallery/pyplots/whats_new_98_4_fill_between.html>`__.
lw, linewidth, linewidths : float, optional
The edge width for the area patches.
ec, edgecolor, edgecolors : color-spec, optional
The edge color for the area patches.
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
%(plot.inbounds)s
**kwargs
Passed to `~matplotlib.axes.Axes.fill_between{suffix}`.
See also
--------
PlotAxes.area
PlotAxes.areax
PlotAxes.fill_between
PlotAxes.fill_betweenx
matplotlib.axes.Axes.fill_between
matplotlib.axes.Axes.fill_betweenx
"""
_snippet_manager['plot.fill_between'] = _fill_docstring.format(
x='x', y='y', suffix=''
)
_snippet_manager['plot.fill_betweenx'] = _fill_docstring.format(
x='y', y='x', suffix='x'
)
# Histogram docstrings
_weight_docstring = """
weights : array-like, optional
The weights associated with each point. If string this
can be retrieved from `data` (see below).
"""
_snippet_manager['plot.weights'] = _weight_docstring
_hist_docstring = """
Plot {orientation} histograms.
Parameters
----------
%(plot.args_1d_{y})s
bins : int or sequence of float, optional
The bin count or sequence of bins.
%(plot.weights)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.hist`.
See also
--------
PlotAxes.hist
PlotAxes.histh
matplotlib.axes.Axes.hist
"""
_snippet_manager['plot.hist'] = _hist_docstring.format(
y='x', orientation='vertical'
)
_snippet_manager['plot.histh'] = _hist_docstring.format(
y='x', orientation='horizontal'
)
# Box plot docstrings
_boxplot_docstring = """
Plot {orientation} boxes and whiskers with a nice default style.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
mean, means : bool, optional
If ``True``, this passes ``showmeans=True`` and ``meanline=True`` to
`~matplotlib.axes.Axes.boxplot`.
fill : bool, optional
Whether to fill the box with a color.
fc, facecolor, fillcolor : color-spec or sequence, optional
The fill color for the boxes. Default is the next color cycler color. If
a sequence, it should be the same length as the number of objects.
a, alpha, fa, facealpha, fillalpha : float, optional
The opacity of the boxes. Default is ``1.0``. If a sequence, should
be the same length as the number of objects.
lw, linewidth, linewidths : float, optional
The linewidth of all objects. Default is :rc:`patch.linewidth`.
c, color, colors, ec, edgecolor, edgecolors : color-spec or sequence, optional
The color of all objects. Default is ``'black'``. If a sequence, should
be the same length as the number of objects.
meanls, medianls, meanlinestyle, medianlinestyle, meanlinestyles, medianlinestyles \
: line style-spec, optional
The line style for the mean and median lines drawn horizontally
across the box.
boxc, capc, whiskerc, flierc, meanc, medianc, \
boxcolor, capcolor, whiskercolor, fliercolor, meancolor, mediancolor \
boxcolors, capcolors, whiskercolors, fliercolors, meancolors, mediancolors \
: color-spec or sequence, optional
The color of various boxplot components. If a sequence, should be the
same length as the number of objects. These are shorthands so you don't
have to pass e.g. a ``boxprops`` dictionary.
boxlw, caplw, whiskerlw, flierlw, meanlw, medianlw, boxlinewidth, caplinewidth, \
meanlinewidth, medianlinewidth, whiskerlinewidth, flierlinewidth, boxlinewidths, \
caplinewidths, meanlinewidths, medianlinewidths, whiskerlinewidths, flierlinewidths \
: float, optional
The line width of various boxplot components. These are shorthands so
you don't have to pass e.g. a ``boxprops`` dictionary.
m, marker : marker-spec, optional
Marker style for the 'fliers', i.e. outliers.
ms, markersize : float, optional
Marker size for the 'fliers', i.e. outliers.
%(plot.cycle)s
%(plot.labels_1d)s
**kwargs
Passed to `matplotlib.axes.Axes.boxplot`.
See also
--------
PlotAxes.boxes
PlotAxes.boxesh
PlotAxes.boxplot
PlotAxes.boxploth
matplotlib.axes.Axes.boxplot
"""
_snippet_manager['plot.boxplot'] = _boxplot_docstring.format(
y='y', orientation='vertical'
)
_snippet_manager['plot.boxploth'] = _boxplot_docstring.format(
y='x', orientation='horizontal'
)
# Violin plot docstrings
_violinplot_docstring = """
Plot {orientation} violins with a nice default style matching
`this matplotlib example \
<https://matplotlib.org/stable/gallery/statistics/customized_violin.html>`__.
Parameters
----------
%(plot.args_1d_{y})s
%(plot.args_1d_shared)s
Other parameters
----------------
fc, facecolor, facecolors, fillcolor, fillcolors : color-spec or sequence, optional
The violin plot fill color. Default is the next color cycler color. If
a sequence, should be the same length as the number of objects.
c, color, colors, ec, edgecolor, edgecolors : color-spec or sequence, optional
The edge color for the violin patches. Default is ``'black'``. If a
sequence, should be the same length as the number of objects.
a, alpha, fa, facealpha, fillalpha : float or sequence, optional
The opacity of the violins. Default is ``1.0``. If a sequence,
should be the same length as the number of objects.
lw, linewidth, linewidths : float, optional
The linewidth of the line objects. Default is :rc:`patch.linewidth`.
%(plot.cycle)s
%(plot.labels_1d)s
%(plot.error_bars)s
**kwargs
Passed to `matplotlib.axes.Axes.violinplot`.
Note
----
It is no longer possible to show minima and maxima with whiskers --
while this is useful for `~matplotlib.axes.Axes.boxplot`\\ s it is
redundant for `~matplotlib.axes.Axes.violinplot`\\ s.
See also
--------
PlotAxes.violins
PlotAxes.violinsh
PlotAxes.violinplot
PlotAxes.violinploth
matplotlib.axes.Axes.violinplot
"""
_snippet_manager['plot.violinplot'] = _violinplot_docstring.format(
y='y', orientation='vertical'
)
_snippet_manager['plot.violinploth'] = _violinplot_docstring.format(
y='x', orientation='horizontal'
)
# Contour docstrings
_contour_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d)s
%(plot.args_2d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.levels_auto)s
%(plot.labels_2d)s
%(plot.guide)s
{add}lw, linewidth, linewidths : optional
The width of the contour lines.
For `contourf` plots, lines are added between the filled contours.
ls, linestyle, linestyles : optional
The style of the contour lines.
For `contourf` plots, lines are added between the filled contours.
ec, edgecolor, edgecolors : optional
The color for the contour lines.
For `contourf` plots, lines are added between the filled contours.
c, color, colors : optional
The color(s) for the contour lines or filled contours. If not passed,
the color is determined by `cmap` and the `z` data.
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
PlotAxes.contour
PlotAxes.contourf
PlotAxes.tricontour
PlotAxes.tricontourf
matplotlib.axes.Axes.{command}
"""
_edgefix_docstring = """
edgefix : bool or float, optional
Whether to fix an issue where `white lines appear between filled contours
<https://stackoverflow.com/q/8263769/4970632>`__ in saved vector graphics.
This can slow down figure rendering. Default is :rc:`cmap.edgefix`.
If ``True``, a default linewidth is used. If float, this linewidth is used.
"""
_snippet_manager['plot.contour'] = _contour_docstring.format(
descrip='contour lines', command='contour', add=''
)
_snippet_manager['plot.contourf'] = _contour_docstring.format(
descrip='filled contours', command='contourf', add=_edgefix_docstring
)
_snippet_manager['plot.tricontour'] = _contour_docstring.format(
descrip='contour lines on a triangular grid', command='tricontour', add=''
)
_snippet_manager['plot.tricontourf'] = _contour_docstring.format(
descrip='filled contours on a triangular grid', command='tricontourf', add=_edgefix_docstring # noqa: E501
)
# Pcolor docstring
_pcolor_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d)s
%(plot.args_2d_shared)s
{add}
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.levels_auto)s
%(plot.labels_2d)s
%(plot.guide)s
lw, linewidth, linewidths : optional
The width of lines between grid boxes.
ls, linestyle, linestyles : optional
The style of lines between grid boxes.
ec, edgecolor, edgecolors : optional
The color for lines between grid boxes.
c, color, colors : optional
The color(s) for the grid boxes. If not passed,
the color is determined by `cmap` and the `z` data.
edgefix : bool, optional
Whether to fix an issue where `white lines appear between grid boxes
<https://stackoverflow.com/q/8263769/4970632>`__ in saved vector graphics.
This can slow down figure rendering. Default is :rc:`cmap.edgefix`.
If ``True``, a default linewidth is used. If float, this linewidth is used.
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
PlotAxes.pcolor
PlotAxes.pcolormesh
PlotAxes.pcolorfast
PlotAxes.heatmap
PlotAxes.tripcolor
matplotlib.axes.Axes.{command}
"""
_heatmap_descrip = """
grid boxes with formatting suitable for heatmaps. Ensures square grid
boxes, adds major ticks to the center of each grid box, disables minor ticks
and gridlines, and sets :rcraw:`cmap.discrete` to ``False`` by default.
"""
_heatmap_aspect = """
aspect : {'equal', 'auto'} or float, optional
Modify the axes aspect ratio. The aspect ratio is of particular
relevance for heatmaps since it may lead to non-square grid boxes.
This parameter is a shortcut for calling `~matplotlib.axes.set_aspect`.
Default is :rc:`image.aspect`. The options are as follows:
* Number: The data aspect ratio.
* ``'equal'``: A data aspect ratio of 1.
* ``'auto'``: Allows the data aspect ratio to change depending on
the layout. In general this results in non-square grid boxes.
"""
_snippet_manager['plot.pcolor'] = _pcolor_docstring.format(
descrip='irregular grid boxes', command='pcolor', add=''
)
_snippet_manager['plot.pcolormesh'] = _pcolor_docstring.format(
descrip='regular grid boxes', command='pcolormesh', add=''
)
_snippet_manager['plot.pcolorfast'] = _pcolor_docstring.format(
descrip='grid boxes quickly', command='pcolorfast', add=''
)
_snippet_manager['plot.tripcolor'] = _pcolor_docstring.format(
descrip='triangular grid boxes', command='tripcolor', add=''
)
_snippet_manager['plot.heatmap'] = _pcolor_docstring.format(
descrip=_heatmap_descrip.strip(), command='pcolormesh', add=_heatmap_aspect
)
# Flow function docstring
_flow_docstring = """
Plot {descrip}.
Parameters
----------
%(plot.args_2d_flow)s
c, color, colors : array-like or color-spec, optional
The colors of the {descrip} passed as either a keyword argument
or a fifth positional argument. This can be a single color or
a color array to be scaled by `cmap` and `norm`.
%(plot.args_2d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.levels_auto)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`
See also
--------
PlotAxes.barbs
PlotAxes.quiver
PlotAxes.stream
PlotAxes.streamplot
matplotlib.axes.Axes.{command}
"""
_snippet_manager['plot.barbs'] = _flow_docstring.format(
descrip='wind barbs', command='barbs'
)
_snippet_manager['plot.quiver'] = _flow_docstring.format(
descrip='quiver arrows', command='quiver'
)
_snippet_manager['plot.stream'] = _flow_docstring.format(
descrip='streamlines', command='streamplot'
)
# Image docstring
_show_docstring = """
Plot {descrip}.
Parameters
----------
z : array-like
The data passed as a positional argument or keyword argument.
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.levels_auto)s
%(plot.guide)s
**kwargs
Passed to `matplotlib.axes.Axes.{command}`.
See also
--------
proplot.axes.PlotAxes
matplotlib.axes.Axes.{command}
"""
_snippet_manager['plot.imshow'] = _show_docstring.format(
descrip='an image', command='imshow'
)
_snippet_manager['plot.matshow'] = _show_docstring.format(
descrip='a matrix', command='matshow'
)
_snippet_manager['plot.spy'] = _show_docstring.format(
descrip='a sparcity pattern', command='spy'
)
def _load_objects():
"""
Load array-like objects.
"""
# NOTE: We just want to detect if *input arrays* belong to these types -- and if
# this is the case, it means the module has already been imported! So, we only
# try loading these classes within autoformat calls. This saves >500ms of import
# time. We use ndarray as the default value for unimported types and in loops we
# are careful to check membership to np.ndarray before anything else.
global ndarray, DataArray, DataFrame, Series, Index, Quantity
ndarray = np.ndarray
DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)
DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)
Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)
Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)
Quantity = getattr(sys.modules.get('pint', None), 'Quantity', ndarray)
_load_objects()
# Standardization utilities
def _is_array(data):
"""
Test whether input is numpy array or pint quantity.
"""
# NOTE: This is used in _iter_columns to identify 2D matrices that
# should be iterated over and omit e.g. scalar marker size or marker color.
_load_objects()
return isinstance(data, ndarray) or ndarray is not Quantity and isinstance(data, Quantity) # noqa: E501
def _is_numeric(data):
"""
Test whether input is numeric array rather than datetime or strings.
"""
return len(data) and np.issubdtype(_to_numpy_array(data).dtype, np.number)
def _is_categorical(data):
"""
Test whether input is array of strings.
"""
return len(data) and isinstance(_to_numpy_array(data).item(0), str)
def _require_centers(x, y, z):
"""
Enforce that coordinates are centers. Convert from edges if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen - 1 and z.shape[0] == ylen - 1:
# Get centers given edges
if all(z.ndim == 1 and z.size > 1 and _is_numeric(z) for z in (x, y)):
x = 0.5 * (x[1:] + x[:-1])
y = 0.5 * (y[1:] + y[:-1])
else:
if x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1 and _is_numeric(x):
x = 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])
if y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1 and _is_numeric(y):
y = 0.25 * (y[:-1, :-1] + y[:-1, 1:] + y[1:, :-1] + y[1:, 1:])
elif z.shape[-1] != xlen or z.shape[0] != ylen:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} '
f'must match z centers {z.shape} '
f'or z borders {tuple(i+1 for i in z.shape)}.'
)
return x, y
def _require_edges(x, y, z):
"""
Enforce that coordinates are edges. Convert from centers if possible.
"""
xlen, ylen = x.shape[-1], y.shape[0]
if z.ndim == 2 and z.shape[1] == xlen and z.shape[0] == ylen:
# Get edges given centers
if all(z.ndim == 1 and z.size > 1 and _is_numeric(z) for z in (x, y)):
x = edges(x)
y = edges(y)
else:
if x.ndim == 2 and x.shape[0] > 1 and x.shape[1] > 1 and _is_numeric(x):
x = edges2d(x)
if y.ndim == 2 and y.shape[0] > 1 and y.shape[1] > 1 and _is_numeric(y):
y = edges2d(y)
elif z.shape[-1] != xlen - 1 or z.shape[0] != ylen - 1:
# Helpful error message
raise ValueError(
f'Input shapes x {x.shape} and y {y.shape} must match '
f'array centers {z.shape} or '
f'array borders {tuple(i + 1 for i in z.shape)}.'
)
return x, y
def _safe_mask(mask, *args):
"""
Safely apply the mask to the input arrays, accounting for existing masked
or invalid values. Values matching ``False`` are set to `np.nan`.
"""
_load_objects()
invalid = ~mask # True if invalid
args_masked = []
for arg in args:
units = 1
if ndarray is not Quantity and isinstance(arg, Quantity):
arg, units = arg.magnitude, arg.units
arg = ma.masked_invalid(arg, copy=False)
arg = arg.astype(np.float).filled(np.nan)
if arg.size > 1 and arg.shape != invalid.shape:
raise ValueError(f'Mask shape {mask.shape} incompatible with array shape {arg.shape}.') # noqa: E501
if arg.size == 1 or invalid.size == 1: # NOTE: happens with _restrict_inbounds
pass
elif invalid.size == 1:
arg = np.nan if invalid.item() else arg
elif arg.size > 1:
arg[invalid] = np.nan
args_masked.append(arg * units)
return args_masked[0] if len(args_masked) == 1 else args_masked
def _safe_range(data, lo=0, hi=100, automin=True, automax=True):
"""
Safely return the minimum and maximum (default) or percentile range accounting
for masked values. Use min and max functions when possible for speed. Return
``None`` if we faile to get a valid range.
"""
_load_objects()
units = 1
if ndarray is not Quantity and isinstance(data, Quantity):
data, units = data.magnitude, data.units
data = ma.masked_invalid(data, copy=False)
data = data.compressed() # remove all invalid values
min_ = max_ = None
if data.size and automin:
min_ = float(np.min(data) if lo <= 0 else np.percentile(data, lo))
if np.isfinite(min_):
min_ *= units
else:
min_ = None
if data.size and automax:
max_ = float(np.max(data) if hi >= 100 else np.percentile(data, hi))
if np.isfinite(max_):
max_ *= units
else:
max_ = None
return min_, max_
def _to_duck_array(data, strip_units=False):
"""
Convert arbitrary input to duck array. Preserve array containers with metadata.
"""
_load_objects()
if data is None:
raise ValueError('Invalid data None.')
if (
not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index, Quantity))
or not np.iterable(data)
):
# WARNING: this strips e.g. scalar DataArray metadata
data = _to_numpy_array(data)
if strip_units: # used for z coordinates that cannot have units
if isinstance(data, (ndarray, Quantity)):
if Quantity is not ndarray and isinstance(data, Quantity):
data = data.magnitude
elif isinstance(data, DataArray):
if Quantity is not ndarray and isinstance(data.data, Quantity):
data = data.copy(deep=False)
data.data = data.data.magnitude
return data
def _to_numpy_array(data, strip_units=False):
"""
Convert arbitrary input to numpy array. Preserve masked arrays and unit arrays.
"""
_load_objects()
if data is None:
raise ValueError('Invalid data None.')
if isinstance(data, ndarray):
pass
elif isinstance(data, DataArray):
data = data.data # support pint quantities that get unit-stripped later
elif isinstance(data, (DataFrame, Series, Index)):
data = data.values
if Quantity is not ndarray and isinstance(data, Quantity):
if strip_units:
return np.atleast_1d(data.magnitude)
else:
return np.atleast_1d(data.magnitude) * data.units
else:
return np.atleast_1d(data) # natively preserves masked arrays
# Metadata utilities
def _get_data(data, *args):
"""
Try to convert positional `key` arguments to `data[key]`. If argument is string
it could be a valid positional argument like `fmt` so do not raise error.
"""
if data is None:
return
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, str):
try:
array = data[arg]
except KeyError:
pass
else:
args[i] = array
return args
def _get_coords(*args, which='x', **kwargs):
"""
Return the index arrays associated with string coordinates and
keyword arguments updated with index locators and formatters.
"""
# NOTE: Why FixedLocator and not IndexLocator? The latter requires plotting
# lines or else an error is raised... very strange.
# NOTE: Why IndexFormatter and not FixedFormatter? The former ensures labels
# correspond to indices while the latter can mysteriously truncate labels.
res = []
for arg in args:
arg = _to_duck_array(arg)
if not _is_categorical(arg):
res.append(arg)
continue
if arg.ndim > 1:
raise ValueError('Non-1D string coordinate input is unsupported.')
idx = np.arange(len(arg))
kwargs.setdefault(which + 'locator', mticker.FixedLocator(idx))
kwargs.setdefault(which + 'formatter', pticker.IndexFormatter(_to_numpy_array(arg))) # noqa: E501
kwargs.setdefault(which + 'minorlocator', mticker.NullLocator())
res.append(idx)
return (*res, kwargs)
def _get_labels(data, axis=0, always=True):
"""
Return the array-like "labels" along axis `axis`. If `always` is ``False``
we return ``None`` for simple ndarray input.
"""
# NOTE: Previously inferred 'axis 1' metadata of 1D variable using the
# data values metadata but that is incorrect. The paradigm for 1D plots
# is we have row coordinates representing x, data values representing y,
# and column coordinates representing individual series.
labels = None
_load_objects()
if axis not in (0, 1, 2):
raise ValueError(f'Invalid axis {axis}.')
if isinstance(data, (ndarray, Quantity)):
if not always:
pass
elif axis < data.ndim:
labels = np.arange(data.shape[axis])
else: # requesting 'axis 1' on a 1D array
labels = np.array([0])
# Xarray object
# NOTE: Even if coords not present .coords[dim] auto-generates indices
elif isinstance(data, DataArray):
if axis < data.ndim:
labels = data.coords[data.dims[axis]]
elif not always:
pass
else:
labels = np.array([0])
# Pandas object
elif isinstance(data, (DataFrame, Series, Index)):
if axis == 0 and isinstance(data, (DataFrame, Series)):
labels = data.index
elif axis == 1 and isinstance(data, (DataFrame,)):
labels = data.columns
elif not always:
pass
else: # beyond dimensionality
labels = np.array([0])
# Everything else
# NOTE: Ensure data is at least 1D in _to_duck_array so this covers everything
else:
raise ValueError(f'Unrecognized array type {type(data)}.')
return labels
def _get_title(data, include_units=True):
"""
Return the "title" of an array-like object with metadata. Include units in
the title if `include_units` is ``True``.
"""
title = units = None
_load_objects()
if isinstance(data, ndarray):
pass
# Xarray object with possible long_name, standard_name, and units attributes.
# Output depends on if units is True. Prefer long_name (come last in loop).
elif isinstance(data, DataArray):
title = getattr(data, 'name', None)
for key in ('standard_name', 'long_name'):
title = data.attrs.get(key, title)
if include_units:
units = _get_units(data)
# Pandas object. DataFrame has no native name attribute but user can add one
# See: https://github.com/pandas-dev/pandas/issues/447
elif isinstance(data, (DataFrame, Series, Index)):
title = getattr(data, 'name', None) or None
# Pint Quantity
elif isinstance(data, Quantity):
if include_units:
units = _get_units(data)
# Add units or return units alone
if title and units:
title = f'{title} ({units})'
else:
title = title or units
if title is not None:
title = str(title).strip()
return title
def _get_units(data):
"""
Get the unit string from the `xarray.DataArray` attributes or the
`pint.Quantity`. Format the latter with :rcraw:`unitformat`.
"""
_load_objects()
# Get units from the attributes
if ndarray is not DataArray and isinstance(data, DataArray):
units = data.attrs.get('units', None)
data = data.data
if units is not None:
return units
# Get units from the quantity
if ndarray is not Quantity and isinstance(data, Quantity):
fmt = rc.unitformat
try:
units = format(data.units, fmt)
except (TypeError, ValueError):
warnings._warn_proplot(
f'Failed to format pint quantity with format string {fmt!r}.'
)
else:
if 'L' in fmt: # auto-apply LaTeX math indicator
units = '$' + units + '$'
return units
# Geographic utilties
def _geo_basemap_1d(x, *ys, xmin=-180, xmax=180):
"""
Fix basemap geographic 1D data arrays.
"""
# Ensure data is within map bounds
x = _geo_monotonic(x)
ys = _geo_clip(ys)
x_orig, ys_orig, ys = x, ys, []
for y_orig in ys_orig:
x, y = _geo_inbounds(x_orig, y_orig, xmin=xmin, xmax=xmax)
ys.append(y)
return (x, *ys)
def _geo_basemap_2d(x, y, *zs, globe=False, xmin=-180, xmax=180):
"""
Fix basemap geographic 2D data arrays.
"""
x = _geo_monotonic(x)
y = _geo_clip(y) # in case e.g. edges() added points beyond poles
x_orig, y_orig, zs_orig, zs = x, y, zs, []
for z_orig in zs_orig:
# Ensure data is within map bounds
x, y, z = x_orig, y_orig, z_orig
x, z = _geo_inbounds(x, z, xmin=xmin, xmax=xmax)
if not globe or x.ndim > 1 or y.ndim > 1:
zs.append(z)
continue
# Fix gaps in coverage
y, z = _geo_poles(y, z)
x, z = _geo_seams(x, z, xmin=xmin, modulo=False)
zs.append(z)
return (x, y, *zs)
def _geo_cartopy_1d(x, *ys):
"""
Fix cartopy geographic 1D data arrays.
"""
# So far not much to do here
x = _geo_monotonic(x)
ys = _geo_clip(ys)
return (x, *ys)
def _geo_cartopy_2d(x, y, *zs, globe=False):
"""
Fix cartopy geographic 2D data arrays.
"""
x = _geo_monotonic(x)
y = _geo_clip(y) # in case e.g. edges() added points beyond poles
x_orig, y_orig, zs_orig = x, y, zs
zs = []
for z_orig in zs_orig:
# Bail for 2D coordinates
x, y, z = x_orig, y_orig, z_orig
if z is None or not globe or x.ndim > 1 or y.ndim > 1:
zs.append(z)
continue
# Fix gaps in coverage
y, z = _geo_poles(y, z)
x, z = _geo_seams(x, z, modulo=True)
zs.append(z)
return (x, y, *zs)
def _geo_inbounds(x, y, xmin=-180, xmax=180):
"""
Fix conflicts with map coordinates by rolling the data to fall between the
minimum and maximum longitudes and masking out-of-bounds data points.
"""
# Roll in same direction if some points on right-edge extend
# more than 360 above min longitude; *they* should be on left side
if x.ndim != 1:
return x, y
lonroll = np.where(x > xmin + 360)[0] # tuple of ids
if lonroll.size: # non-empty
roll = x.size - lonroll.min()
x = np.roll(x, roll)
y = np.roll(y, roll, axis=-1)
x[:roll] -= 360 # make monotonic
# Set NaN where data not in range xmin, xmax. Must be done for regional smaller
# projections or get weird side-effects from valid data outside boundaries
y = ma.masked_invalid(y, copy=False)
y = y.astype(np.float).filled(np.nan)
if x.size - 1 == y.shape[-1]: # test western/eastern grid cell edges
mask = (x[1:] < xmin) | (x[:-1] > xmax)
y[..., mask] = np.nan
elif x.size == y.shape[-1]: # test the centers and pad by one for safety
where, = np.where((x < xmin) | (x > xmax))
y[..., where[1:-1]] = np.nan
return x, y
def _geo_clip(*ys):
"""
Ensure latitudes span only minus 90 to plus 90 degrees.
"""
ys = tuple(np.clip(y, -90, 90) for y in ys)
return ys[0] if len(ys) == 1 else ys
def _geo_monotonic(x):
"""
Ensure longitudes are monotonic without rolling or filtering coordinates.
"""
# Add 360 until data is greater than base value
# TODO: Is this necessary for cartopy data? Maybe only with _geo_seams?
if x.ndim != 1 or all(x < x[0]): # skip 2D arrays and monotonic backwards data
return x
xmin = x[0]
mask = np.array([True])
while np.sum(mask):
mask = x < xmin
x[mask] += 360
return x
def _geo_poles(y, z):
"""
Fix gaps in coverage over the poles by adding data points at the poles
using averages of the highest latitude data.
"""
# Get means
with np.errstate(all='ignore'):
p1 = np.mean(z[0, :]) # do not ignore NaN if present
p2 = np.mean(z[-1, :])
if hasattr(p1, 'item'):
p1 = np.asscalar(p1) # happens with DataArrays
if hasattr(p2, 'item'):
p2 = np.asscalar(p2)
# Concatenate
ps = (-90, 90) if (y[0] < y[-1]) else (90, -90)
z1 = np.repeat(p1, z.shape[1])[None, :]
z2 = np.repeat(p2, z.shape[1])[None, :]
y = ma.concatenate((ps[:1], y, ps[1:]))
z = ma.concatenate((z1, z, z2), axis=0)
return y, z
def _geo_seams(x, z, xmin=-180, modulo=False):
"""
Fix gaps in coverage over longitude seams by adding points to either
end or both ends of the array.
"""
# Fix seams by ensuring circular coverage (cartopy can plot over map edges)
if modulo:
# Simply test the coverage % 360
if x[0] % 360 != (x[-1] + 360) % 360:
x = ma.concatenate((x, [x[0] + 360]))
z = ma.concatenate((z, z[:, :1]), axis=1)
else:
# Ensure exact match between data and seams
# Edges (e.g. pcolor) fit perfectly against seams. Size is unchanged.
if x[0] == xmin and x.size - 1 == z.shape[1]:
pass
# Edges (e.g. pcolor) do not fit perfectly. Size augmented by 1.
elif x.size - 1 == z.shape[1]:
x = ma.append(xmin, x)
x[-1] = xmin + 360
z = ma.concatenate((z[:, -1:], z), axis=1)
# Centers (e.g. contour) interpolated to edge. Size augmented by 2.
elif x.size == z.shape[1]:
xi = np.array([x[-1], x[0] + 360])
if xi[0] == xi[1]: # impossible to interpolate
pass
else:
zq = ma.concatenate((z[:, -1:], z[:, :1]), axis=1)
xq = xmin + 360
zq = (zq[:, :1] * (xi[1] - xq) + zq[:, 1:] * (xq - xi[0])) / (xi[1] - xi[0]) # noqa: E501
x = ma.concatenate(([xmin], x, [xmin + 360]))
z = ma.concatenate((zq, z, zq), axis=1)
else:
raise ValueError('Unexpected shapes of coordinates or data arrays.')
return x, z
# Misc utilties
def _get_vert(vert=None, orientation=None, **kwargs):
"""
Get the orientation specified as either `vert` or `orientation`. This is
used internally by various helper functions.
"""
if vert is not None:
return kwargs, vert
elif orientation is not None:
return kwargs, orientation != 'horizontal' # should already be validated
else:
return kwargs, True # fallback
def _parse_vert(
vert=None, orientation=None, default_vert=None, default_orientation=None,
**kwargs
):
"""
Interpret both 'vert' and 'orientation' and add to outgoing keyword args
if a default is provided.
"""
# NOTE: Users should only pass these to hist, boxplot, or violinplot. To change
# the plot, scatter, area, or bar orientation users should use the differently
# named functions. Internally, however, they use these keyword args.
if default_vert is not None:
kwargs['vert'] = _not_none(
vert=vert,
orientation=None if orientation is None else orientation == 'vertical',
default=default_vert,
)
if default_orientation is not None:
kwargs['orientation'] = _not_none(
orientation=orientation,
vert=None if vert is None else 'vertical' if vert else 'horizontal',
default=default_orientation,
)
if kwargs.get('orientation', None) not in (None, 'horizontal', 'vertical'):
raise ValueError("Orientation must be either 'horizontal' or 'vertical'.")
return kwargs
def _distribution_reduce(
y, *, mean=None, means=None, median=None, medians=None, **kwargs
):
"""
Return distribution columns reduced into means and medians. Tack on a
distribution keyword argument for processing down the line.
"""
# TODO: Permit 3D array with error dimension coming first
data = y
means = _not_none(mean=mean, means=means)
medians = _not_none(median=median, medians=medians)
if means and medians:
warnings._warn_proplot('Cannot have both means=True and medians=True. Using former.') # noqa: E501
medians = None
if means or medians:
if data.ndim != 2:
raise ValueError(f'Expected 2D array for means=True. Got {data.ndim}D.')
data = ma.masked_invalid(data, copy=False)
data = data.astype(np.float).filled(np.nan)
if not data.size:
raise ValueError('The input data contains all masked or NaN values.')
elif means:
y = np.nanmean(data, axis=0)
elif medians:
y = np.nanmedian(data, axis=0)
kwargs['distribution'] = data
# Save argument passed to _error_bars
return (y, kwargs)
def _distribution_range(
y, distribution, *, errdata=None, absolute=False, label=False,
stds=None, pctiles=None, stds_default=None, pctiles_default=None,
):
"""
Return a plottable characteristic range for the distribution keyword
argument relative to the input coordinate means or medians.
"""
# Parse stds arguments
# NOTE: Have to guard against "truth value of an array is ambiguous" errors
if stds is True:
stds = stds_default
elif stds is False or stds is None:
stds = None
else:
stds = np.atleast_1d(stds)
if stds.size == 1:
stds = sorted((-stds.item(), stds.item()))
elif stds.size != 2:
raise ValueError('Expected scalar or length-2 stdev specification.')
# Parse pctiles arguments
if pctiles is True:
pctiles = pctiles_default
elif pctiles is False or pctiles is None:
pctiles = None
else:
pctiles = np.atleast_1d(pctiles)
if pctiles.size == 1:
delta = (100 - pctiles.item()) / 2.0
pctiles = sorted((delta, 100 - delta))
elif pctiles.size != 2:
raise ValueError('Expected scalar or length-2 pctiles specification.')
# Incompatible settings
if distribution is None and any(_ is not None for _ in (stds, pctiles)):
raise ValueError(
'To automatically compute standard deviations or percentiles on '
'columns of data you must pass means=True or medians=True.'
)
if stds is not None and pctiles is not None:
warnings._warn_proplot(
'Got both a standard deviation range and a percentile range for '
'auto error indicators. Using the standard deviation range.'
)
pctiles = None
if distribution is not None and errdata is not None:
stds = pctiles = None
warnings._warn_proplot(
'You explicitly provided the error bounds but also requested '
'automatically calculating means or medians on data columns. '
'It may make more sense to use the "stds" or "pctiles" keyword args '
'and have *proplot* calculate the error bounds.'
)
# Compute error data in format that can be passed to maxes.Axes.errorbar()
# NOTE: Include option to pass symmetric deviation from central points
if errdata is not None:
# Manual error data
if y.ndim != 1:
raise ValueError('errdata with 2D y coordinates is not yet supported.')
label_default = 'uncertainty'
err = _to_numpy_array(errdata)
if (
err.ndim not in (1, 2)
or err.shape[-1] != y.size
or err.ndim == 2 and err.shape[0] != 2
):
raise ValueError(f'errdata has shape {err.shape}. Expected (2, {y.shape[-1]}).') # noqa: E501
if err.ndim == 1:
abserr = err
err = np.empty((2, err.size))
err[0, :] = y - abserr # translated back to absolute deviations below
err[1, :] = y + abserr
elif stds is not None:
# Standard deviations
# NOTE: Invalid values were handled by _distribution_reduce
label_default = fr'{abs(stds[1])}$\sigma$ range'
stds = _to_numpy_array(stds)[:, None]
err = y + stds * np.nanstd(distribution, axis=0)
elif pctiles is not None:
# Percentiles
# NOTE: Invalid values were handled by _distribution_reduce
label_default = f'{pctiles[1] - pctiles[0]}% range'
err = np.nanpercentile(distribution, pctiles, axis=0)
else:
raise ValueError('You must provide error bounds.')
# Return data with legend entry
if not absolute: # for errorbar() ingestion
err = err - y
err[0, :] *= -1 # absolute deviations from central points
if label is True:
label = label_default
elif not label:
label = None
return err, label
# Input preprocessor
def _preprocess_data(*keys, keywords=None, allow_extra=True):
"""
Redirect internal plotting calls to native matplotlib methods. Also perform
convert keyword args to positional and pass arguments through 'data' dictionary.
"""
# Keyword arguments processed through 'data'
# Positional arguments are always processed through data
keywords = keywords or ()
if isinstance(keywords, str):
keywords = (keywords,)
def decorator(func):
name = func.__name__
@functools.wraps(func)
def _redirect_or_standardize(self, *args, **kwargs):
if getattr(self, '_internal_call', None):
# Redirect internal matplotlib call to native function
func_native = getattr(super(PlotAxes, self), name)
return func_native(*args, **kwargs)
else:
# Impose default coordinate system
if self.name == 'proplot_basemap' and name in BASEMAP_FUNCS:
latlon = kwargs.pop('latlon', None)
kwargs['latlon'] = _not_none(latlon, True)
if self.name == 'proplot_cartopy' and name in CARTOPY_FUNCS:
transform = kwargs.pop('transform', None)
kwargs['transform'] = _not_none(transform, PlateCarree())
# Process data args
# NOTE: Raises error if there are more args than keys
args, kwargs = _keyword_to_positional(
keys, *args, allow_extra=allow_extra, **kwargs
)
data = kwargs.pop('data', None)
if data is not None:
args = _get_data(data, *args)
for key in set(keywords) & set(kwargs):
kwargs[key] = _get_data(data, kwargs[key])
# Auto-setup matplotlib with the input unit registry
_load_objects()
for arg in args:
if ndarray is not DataArray and isinstance(arg, DataArray):
arg = arg.data
if ndarray is not Quantity and isinstance(arg, Quantity):
ureg = getattr(arg, '_REGISTRY', None)
if hasattr(ureg, 'setup_matplotlib'):
ureg.setup_matplotlib(True)
# Call main function
return func(self, *args, **kwargs) # call unbound method
return _redirect_or_standardize
return decorator
class PlotAxes(base.Axes):
"""
The second lowest-level `~matplotlib.axes.Axes` subclass used by proplot.
Implements all plotting overrides.
"""
def __init__(self, *args, **kwargs):
"""
Parameters
----------
*args, **kwargs
Passed to `~proplot.axes.Axes`.
See also
--------
matplotlib.axes.Axes
proplot.axes.Axes
proplot.axes.CartesianAxes
proplot.axes.PolarAxes
proplot.axes.GeoAxes
"""
super().__init__(*args, **kwargs)
def _plot_safe(self, name, *args, **kwargs):
"""
Call the plotting method and use context object to redirect internal
calls to native methods. Finally add attributes to outgoing methods.
"""
# NOTE: Previously allowed internal matplotlib plotting function calls to run
# through proplot overrides then avoided awkward conflicts in piecemeal fashion.
# Now prevent internal calls from running through overrides using preprocessor
kwargs.pop('distribution', None) # remove stat distributions
with _state_context(self, _internal_call=True):
if getattr(self, 'name', None) == 'proplot_basemap':
obj = getattr(self.projection, name)(*args, ax=self, **kwargs)
else:
obj = getattr(super(), name)(*args, **kwargs)
return obj
def _plot_edges(self, method, *args, **kwargs):
"""
Call the contour method to add "edges" to filled contours.
"""
# NOTE: This is also used to provide an object that can be used by 'clabel'
# for auto-labels. Filled contours seem to create weird artifacts.
# NOTE: Make the default 'line width' identical to one used for pcolor plots
# rather than rc['contour.linewidth']. See mpl pcolor() source code
if not any(key in kwargs for key in ('linewidths', 'linestyles', 'edgecolors')):
kwargs['linewidths'] = 0 # for clabel
kwargs.setdefault('linewidths', EDGEWIDTH)
kwargs.pop('cmap', None)
kwargs['colors'] = kwargs.pop('edgecolors', 'k')
return self._plot_safe(method, *args, **kwargs)
def _plot_negpos(
self, name, x, *ys, negcolor=None, poscolor=None, colorkey='facecolor',
use_where=False, use_zero=False, **kwargs
):
"""
Call the plot method separately for "negative" and "positive" data.
"""
if use_where:
kwargs.setdefault('interpolate', True) # see fill_between docs
for key in ('color', 'colors', 'facecolor', 'facecolors', 'where'):
value = kwargs.pop(key, None)
if value is not None:
warnings._warn_proplot(
f'{name}() argument {key}={value!r} is incompatible with negpos=True. Ignoring.' # noqa: E501
)
# Negative component
yneg = list(ys) # copy
if use_zero: # filter bar heights
yneg[0] = _safe_mask(ys[0] < 0, ys[0])
elif use_where: # apply fill_between mask
kwargs['where'] = ys[1] < ys[0]
else:
yneg = _safe_mask(ys[1] < ys[0], *ys)
kwargs[colorkey] = _not_none(negcolor, rc['negcolor'])
negobj = self._plot_safe(name, x, *yneg, **kwargs)
# Positive component
ypos = list(ys) # copy
if use_zero: # filter bar heights
ypos[0] = _safe_mask(ys[0] >= 0, ys[0])
elif use_where: # apply fill_between mask
kwargs['where'] = ys[1] >= ys[0]
else:
ypos = _safe_mask(ys[1] >= ys[0], *ys)
kwargs[colorkey] = _not_none(poscolor, rc['poscolor'])
posobj = self._plot_safe(name, x, *ypos, **kwargs)
return (negobj, posobj)
def _add_queued_guide(
self, objs, colorbar=None, colorbar_kw=None, legend=None, legend_kw=None,
):
"""
Queue the input artist(s) for an automatic legend or colorbar once
the axes is drawn or auto layout is called.
"""
# WARNING: This should generally be last in the pipeline before calling
# the plot function or looping over data columns. The colormap parser
# and standardize functions both modify colorbar_kw and legend_kw.
if colorbar:
colorbar_kw = colorbar_kw or {}
self.colorbar(objs, loc=colorbar, queue=True, **colorbar_kw)
else:
_guide_kw_to_obj(objs, 'colorbar', colorbar_kw) # save for later
if legend:
legend_kw = legend_kw or {}
self.legend(objs, loc=legend, queue=True, **legend_kw)
else:
_guide_kw_to_obj(objs, 'legend', legend_kw) # save for later
def _add_sticky_edges(self, objs, axis, *args):
"""
Add sticky edges to the input artists using the minimum and maximum of the
input coordinates. This is used to copy `bar` behavior to `area` and `lines`.
"""
iter_ = list(obj for _ in objs for obj in (_ if isinstance(_, tuple) else (_,)))
for sides in args:
sides = np.atleast_1d(sides)
if not sides.size:
continue
min_, max_ = _safe_range(sides)
if min_ is None or max_ is None:
continue
for obj in iter_:
convert = getattr(self, 'convert_' + axis + 'units')
edges = getattr(obj.sticky_edges, axis)
edges.extend(convert((min_, max_)))
def _auto_format_1d(
self, x, *ys, zerox=False, autox=True, autoy=True, autoformat=None,
autoreverse=True, autolabels=True, autovalues=False, autoguide=True,
label=None, labels=None, value=None, values=None, **kwargs
):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also update the keyword arguments.
"""
# Parse input
y = max(ys, key=lambda y: y.size) # try to find a non-scalar y for metadata
autox = autox and not zerox # so far just relevant for hist()
autoformat = _not_none(autoformat, rc['autoformat'])
kwargs, vert = _get_vert(**kwargs)
labels = _not_none(
label=label,
labels=labels,
value=value,
values=values,
legend_kw_labels=kwargs.get('legend_kw', {}).pop('labels', None),
colorbar_kw_values=kwargs.get('colorbar_kw', {}).pop('values', None),
)
# Retrieve the x coords
# NOTE: Where columns represent distributions, like for box and violinplot or
# where we use 'means' or 'medians', columns coords (axis 1) are 'x' coords.
# Otherwise, columns represent e.g. lines and row coords (axis 0) are 'x'
# coords. Exception is passing "ragged arrays" to boxplot and violinplot.
dists = any(kwargs.get(s) for s in ('mean', 'means', 'median', 'medians'))
raggd = any(getattr(y, 'dtype', None) == 'object' for y in ys)
xaxis = 0 if raggd else 1 if dists or not autoy else 0
if autox and x is None:
x = _get_labels(y, axis=xaxis) # use the first one
# Retrieve the labels. We only want default legend labels if this is an
# object with 'title' metadata and/or the coords are string.
# WARNING: Confusing terminology differences here -- for box and violin plots
# labels refer to indices along x axis.
if autolabels and labels is None:
laxis = 0 if not autox and not autoy else xaxis if not autoy else xaxis + 1
if laxis >= y.ndim:
labels = _get_title(y)
else:
labels = _get_labels(y, axis=laxis, always=False)
notitle = not _get_title(labels)
if labels is None:
pass
elif notitle and not any(isinstance(_, str) for _ in labels):
labels = None
# Apply the labels or values
if labels is not None:
if autovalues:
kwargs['values'] = _to_numpy_array(labels)
elif autolabels:
kwargs['labels'] = _to_numpy_array(labels)
# Apply title for legend or colorbar that uses the labels or values
if autoguide and autoformat:
title = _get_title(labels)
if title: # safely update legend_kw and colorbar_kw
_guide_kw_to_arg('legend', kwargs, title=title)
_guide_kw_to_arg('colorbar', kwargs, label=title)
# Apply the basic x and y settings
autox = autox and self.name == 'proplot_cartesian'
autoy = autoy and self.name == 'proplot_cartesian'
sx, sy = 'xy' if vert else 'yx'
kw_format = {}
if autox and autoformat: # 'x' axis
title = _get_title(x)
if title:
axis = getattr(self, sx + 'axis')
if axis.isDefault_label:
kw_format[sx + 'label'] = title
if autoy and autoformat: # 'y' axis
sy = sx if zerox else sy # hist() 'y' values are along 'x' axis
title = _get_title(y)
if title:
axis = getattr(self, sy + 'axis')
if axis.isDefault_label:
kw_format[sy + 'label'] = title
# Convert string-type coordinates
# NOTE: This should even allow qualitative string input to hist()
if autox:
x, kw_format = _get_coords(x, which=sx, **kw_format)
if autoy:
*ys, kw_format = _get_coords(*ys, which=sy, **kw_format)
if autox and autoreverse and x.ndim == 1 and x.size > 1 and x[1] < x[0]:
kw_format[sx + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Finally strip metadata
# WARNING: Most methods that accept 2D arrays use columns of data, but when
# pandas DataFrame specifically is passed to hist, boxplot, or violinplot, rows
# of data assumed! Converting to ndarray necessary.
ys = tuple(map(_to_numpy_array, ys))
if x is not None: # pie() and hist()
x = _to_numpy_array(x)
return (x, *ys, kwargs)
def _standardize_1d(self, x, *ys, **kwargs):
"""
Interpret positional arguments for all "1D" plotting commands.
"""
# Standardize values
zerox = not ys
if zerox or all(y is None for y in ys): # pad with remaining Nones
x, *ys = None, x, *ys[1:]
if len(ys) == 2: # 'lines' or 'fill_between'
if ys[1] is None:
ys = (np.array([0.0]), ys[0]) # user input 1 or 2 positional args
elif ys[0] is None:
ys = (np.array([0.0]), ys[1]) # user input keyword 'y2' but no y1
if any(y is None for y in ys):
raise ValueError('Missing required data array argument.')
ys = tuple(map(_to_duck_array, ys))
if x is not None:
x = _to_duck_array(x)
x, *ys, kwargs = self._auto_format_1d(x, *ys, zerox=zerox, **kwargs)
# Geographic corrections
if self.name == 'proplot_cartopy' and isinstance(kwargs.get('transform'), PlateCarree): # noqa: E501
x, *ys = _geo_cartopy_1d(x, *ys)
elif self.name == 'proplot_basemap' and kwargs.get('latlon', None):
xmin, xmax = self.projection.lonmin, self.projection.lonmax
x, *ys = _geo_basemap_1d(x, *ys, xmin=xmin, xmax=xmax)
return (x, *ys, kwargs)
def _auto_format_2d(self, x, y, *zs, autoformat=None, autoguide=True, **kwargs):
"""
Try to retrieve default coordinates from array-like objects and apply default
formatting. Also apply optional transpose and update the keyword arguments.
"""
# Retrieve coordinates
autoformat = _not_none(autoformat, rc['autoformat'])
if x is None and y is None:
z = zs[0]
if z.ndim == 1:
x = _get_labels(z, axis=0)
y = np.zeros(z.shape) # default barb() and quiver() behavior in mpl
else:
x = _get_labels(z, axis=1)
y = _get_labels(z, axis=0)
# Apply labels and XY axis settings
if self.name == 'proplot_cartesian':
# Apply labels
# NOTE: Do not overwrite existing labels!
kw_format = {}
if autoformat:
for s, d in zip('xy', (x, y)):
title = _get_title(d)
if title:
axis = getattr(self, s + 'axis')
if axis.isDefault_label:
kw_format[s + 'label'] = title
# Handle string-type coordinates
x, kw_format = _get_coords(x, which='x', **kw_format)
y, kw_format = _get_coords(y, which='y', **kw_format)
for s, d in zip('xy', (x, y)):
if d.size > 1 and d.ndim == 1 and _to_numpy_array(d)[1] < _to_numpy_array(d)[0]: # noqa: E501
kw_format[s + 'reverse'] = True
# Apply formatting
if kw_format:
self.format(**kw_format)
# Apply title for legend or colorbar
if autoguide and autoformat:
title = _get_title(zs[0])
if title: # safely update legend_kw and colorbar_kw
_guide_kw_to_arg('legend', kwargs, title=title)
_guide_kw_to_arg('colorbar', kwargs, label=title)
# Finally strip metadata
x = _to_numpy_array(x)
y = _to_numpy_array(y)
zs = tuple(map(_to_numpy_array, zs))
return (x, y, *zs, kwargs)
def _standardize_2d(
self, x, y, *zs, globe=False, edges=False, allow1d=False, order='C',
**kwargs
):
"""
Interpret positional arguments for all "2D" plotting commands.
"""
# Standardize values
# NOTE: Functions pass two 'zs' at most right now
if all(z is None for z in zs):
x, y, zs = None, None, (x, y)[:len(zs)]
if any(z is None for z in zs):
raise ValueError('Missing required data array argument(s).')
zs = tuple(_to_duck_array(z, strip_units=True) for z in zs)
if x is not None:
x = _to_duck_array(x)
if y is not None:
y = _to_duck_array(y)
if order == 'F':
zs = tuple(z.T for z in zs)
if x is not None:
x = x.T
if y is not None:
y = y.T
x, y, *zs, kwargs = self._auto_format_2d(x, y, *zs, **kwargs)
if edges:
# NOTE: These functions quitely pass through 1D inputs, e.g. barb data
x, y = _require_edges(x, y, zs[0])
else:
x, y = _require_centers(x, y, zs[0])
# Geographic corrections
if allow1d:
pass
elif self.name == 'proplot_cartopy' and isinstance(kwargs.get('transform'), PlateCarree): # noqa: E501
x, y, *zs = _geo_cartopy_2d(x, y, *zs, globe=globe)
elif self.name == 'proplot_basemap' and kwargs.get('latlon', None):
xmin, xmax = self.projection.lonmin, self.projection.lonmax
x, y, *zs = _geo_basemap_2d(x, y, *zs, xmin=xmin, xmax=xmax, globe=globe)
x, y = np.meshgrid(x, y) # WARNING: required always
return (x, y, *zs, kwargs)
def _parse_inbounds(self, *, inbounds=None, **kwargs):
"""
Capture the `inbounds` keyword arg and return data limit
extents if it is ``True``. Otherwise return ``None``. When
``_restrict_inbounds`` gets ``None`` it will silently exit.
"""
extents = None
inbounds = _not_none(inbounds, rc['axes.inbounds'])
if inbounds:
extents = list(self.dataLim.extents) # ensure modifiable
return kwargs, extents
def _mask_inbounds(self, x, y, z, *, to_centers=False):
"""
Restrict the sample data used for automatic `vmin` and `vmax` selection
based on the existing x and y axis limits.
"""
# Get masks
# WARNING: Experimental, seems robust but this is not mission-critical so
# keep this in a try-except clause for now. However *internally* we should
# not reach this block unless everything is an array so raise that error.
xmask = ymask = None
if self.name != 'proplot_cartesian':
return z # TODO: support geographic projections when input is PlateCarree()
if not all(getattr(a, 'ndim', None) in (1, 2) for a in (x, y, z)):
raise ValueError('Invalid input coordinates. Must be 1D or 2D arrays.')
try:
# Get centers and masks
if to_centers and z.ndim == 2:
x, y = _require_centers(x, y, z)
if not self.get_autoscalex_on():
xlim = self.get_xlim()
xmask = (x >= min(xlim)) & (x <= max(xlim))
if not self.get_autoscaley_on():
ylim = self.get_ylim()
ymask = (y >= min(ylim)) & (y <= max(ylim))
# Get subsample
if xmask is not None and ymask is not None:
z = z[np.ix_(ymask, xmask)] if z.ndim == 2 and xmask.ndim == 1 else z[ymask & xmask] # noqa: E501
elif xmask is not None:
z = z[:, xmask] if z.ndim == 2 and xmask.ndim == 1 else z[xmask]
elif ymask is not None:
z = z[ymask, :] if z.ndim == 2 and ymask.ndim == 1 else z[ymask]
return z
except Exception as err:
warnings._warn_proplot(
'Failed to restrict automatic colormap normalization algorithm '
f'to in-bounds data only. Error message: {err}'
)
return z
def _restrict_inbounds(self, extents, x, y, **kwargs):
"""
Restrict the `dataLim` to exclude out-of-bounds data when x (y) limits
are fixed and we are determining default y (x) limits. This modifies
the mutable input `extents` to support iteration over columns.
"""
# WARNING: This feature is still experimental. But seems obvious. Matplotlib
# updates data limits in ad hoc fashion differently for each plotting command
# but since proplot standardizes inputs we can easily use them for dataLim.
kwargs, vert = _get_vert(**kwargs)
if extents is None or self.name != 'proplot_cartesian':
return
if not x.size or not y.size:
return
if not vert:
x, y = y, x
trans = self.dataLim
autox, autoy = self.get_autoscalex_on(), self.get_autoscaley_on()
try:
if autoy and not autox and x.shape == y.shape:
# Reset the y data limits
xmin, xmax = sorted(self.get_xlim())
mask = (x >= xmin) & (x <= xmax)
ymin, ymax = _safe_range(_safe_mask(mask, y)) # in-bounds y limits
convert = self.convert_yunits # handle datetime, pint units
if ymin is not None:
trans.y0 = extents[1] = min(convert(ymin), extents[1])
if ymax is not None:
trans.y1 = extents[3] = max(convert(ymax), extents[3])
self._request_autoscale_view()
if autox and not autoy and y.shape == x.shape:
# Reset the x data limits
ymin, ymax = sorted(self.get_ylim())
mask = (y >= ymin) & (y <= ymax)
xmin, xmax = _safe_range(_safe_mask(mask, x))
convert = self.convert_xunits # handle datetime, pint units
if xmin is not None:
trans.x0 = extents[0] = min(convert(xmin), extents[0])
if xmax is not None:
trans.x1 = extents[2] = max(convert(xmax), extents[2])
self._request_autoscale_view()
except Exception as err:
warnings._warn_proplot(
'Failed to restrict automatic y (x) axis limit algorithm to '
f'data within locked x (y) limits only. Error message: {err}'
)
def _parse_color(self, x, y, c, *, apply_cycle=True, **kwargs):
"""
Parse either a colormap or color cycler. Colormap will be discrete and fade
to subwhite luminance by default. Returns a HEX string if needed so we don't
get ambiguous color warnings. Used with scatter, streamplot, quiver, barbs.
"""
# NOTE: This function is positioned right above all _parse_cmap and
# _parse_cycle functions and helper functions.
if c is None or mcolors.is_color_like(c):
if c is not None:
c = pcolors.to_hex(c) # avoid scatter() ambiguous color warning
if apply_cycle: # False for scatter() so we can wait to get correct 'N'
kwargs = self._parse_cycle(**kwargs)
methods = (self._parse_cmap, self._parse_levels, self._parse_autolev, self._parse_vlim) # noqa: E501
else:
kwargs['line_plot'] = True
kwargs['default_discrete'] = False
kwargs = self._parse_cmap(x, y, c, **kwargs)
methods = (self._parse_cycle,)
pop = _pop_params(kwargs, *methods, ignore_internal=True)
if pop:
warnings._warn_proplot(f'Ignoring bad unused keyword arg(s): {pop}')
return (c, kwargs)
def _parse_vlim(
self, *args,
vmin=None, vmax=None, to_centers=False,
robust=None, inbounds=None, **kwargs,
):
"""
Return a suitable vmin and vmax based on the input data.
Parameters
----------
*args
The sample data.
vmin, vmax : float, optional
The user input minimum and maximum.
robust : bool, optional
Whether to limit the default range to exclude outliers.
inbounds : bool, optional
Whether to filter to in-bounds data.
to_centers : bool, optional
Whether to convert coordinates to 'centers'.
Returns
-------
vmin, vmax : float
The minimum and maximum.
kwargs
Unused arguemnts.
"""
# Parse vmin and vmax
automin = vmin is None
automax = vmax is None
if not automin and not automax:
return vmin, vmax, kwargs
# Parse input args
inbounds = _not_none(inbounds, rc['cmap.inbounds'])
robust = _not_none(robust, rc['cmap.robust'], False)
robust = 96 if robust is True else 100 if robust is False else robust
robust = np.atleast_1d(robust)
if robust.size == 1:
pmin, pmax = 50 + 0.5 * np.array([-robust.item(), robust.item()])
elif robust.size == 2:
pmin, pmax = robust.flat # pull out of array
else:
raise ValueError(f'Unexpected robust={robust!r}. Must be bool, float, or 2-tuple.') # noqa: E501
# Get sample data
# NOTE: Critical to use _to_duck_array here because some commands
# are unstandardized.
# NOTE: Try to get reasonable *count* levels for hexbin/hist2d, but in general
# have no way to select nice ones a priori (why we disable discretenorm).
# NOTE: Currently we only ever use this function with *single* array input
# but in future could make this public as a way for users (me) to get
# automatic synced contours for a bunch of arrays in a grid.
vmins, vmaxs = [], []
if len(args) > 2:
x, y, *zs = args
else:
x, y, *zs = None, None, *args
for z in zs:
if z is None: # e.g. empty scatter color
continue
if z.ndim > 2: # e.g. imshow data
continue
z = _to_numpy_array(z)
if inbounds and x is not None and y is not None: # ignore if None coords
z = self._mask_inbounds(x, y, z, to_centers=to_centers)
vmin, vmax = _safe_range(z, pmin, pmax, automin=automin, automax=automax)
if vmin is not None:
vmins.append(vmin)
if vmax is not None:
vmaxs.append(vmax)
return min(vmins, default=0), max(vmaxs, default=1), kwargs
def _parse_autolev(
self, *args, levels=None,
extend='neither', norm=None, norm_kw=None, vmin=None, vmax=None,
locator=None, locator_kw=None, symmetric=None, **kwargs
):
"""
Return a suitable level list given the input data, normalizer,
locator, and vmin and vmax.
Parameters
----------
*args
The sample data. Passed to `_parse_vlim`.
levels : int
The approximate number of levels.
vmin, vmax : float, optional
The approximate minimum and maximum level edges. Passed to the locator.
diverging : bool, optional
Whether the resulting levels are intended for a diverging normalizer.
symmetric : bool, optional
Whether the resulting levels should be symmetric about zero.
norm, norm_kw : optional
Passed to `~proplot.constructor.Norm`. Used to change the default
`locator` (e.g., a `~matplotlib.colors.LogNorm` normalizer will use
a `~matplotlib.ticker.LogLocator` to generate levels).
Parameters
----------
levels : list of float
The level edges.
kwargs
Unused arguments.
"""
# Input args
# NOTE: Some of this is adapted from the hidden contour.ContourSet._autolev
# NOTE: We use 'symmetric' with MaxNLocator to ensure boundaries include a
# zero level but may trim many of these below.
norm_kw = norm_kw or {}
locator_kw = locator_kw or {}
levels = _not_none(levels, rc['cmap.levels'])
vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))
vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))
norm = constructor.Norm(norm or 'linear', **norm_kw)
symmetric = _not_none(
symmetric=symmetric,
locator_kw_symmetric=locator_kw.pop('symmetric', None),
default=False,
)
# Get default locator from input norm
# NOTE: This normalizer is only temporary for inferring level locs
norm = constructor.Norm(norm or 'linear', **norm_kw)
if locator is not None:
locator = constructor.Locator(locator, **locator_kw)
elif isinstance(norm, mcolors.LogNorm):
locator = mticker.LogLocator(**locator_kw)
elif isinstance(norm, mcolors.SymLogNorm):
for key, default in (('base', 10), ('linthresh', 1)):
val = _not_none(getattr(norm, key, None), getattr(norm, '_' + key, None), default) # noqa: E501
locator_kw.setdefault(key, val)
locator = mticker.SymmetricalLogLocator(**locator_kw)
else:
locator_kw['symmetric'] = symmetric
locator = mticker.MaxNLocator(levels, min_n_ticks=1, **locator_kw)
# Get default level locations
nlevs = levels
automin = vmin is None
automax = vmax is None
vmin, vmax, kwargs = self._parse_vlim(*args, vmin=vmin, vmax=vmax, **kwargs)
try:
levels = locator.tick_values(vmin, vmax)
except RuntimeError: # too-many-ticks error
levels = np.linspace(vmin, vmax, levels) # TODO: _autolev used N+1
# Possibly trim levels far outside of 'vmin' and 'vmax'
# NOTE: This part is mostly copied from matplotlib _autolev
if not symmetric:
i0, i1 = 0, len(levels) # defaults
under, = np.where(levels < vmin)
if len(under):
i0 = under[-1]
if not automin or extend in ('min', 'both'):
i0 += 1 # permit out-of-bounds data
over, = np.where(levels > vmax)
if len(over):
i1 = over[0] + 1 if len(over) else len(levels)
if not automax or extend in ('max', 'both'):
i1 -= 1 # permit out-of-bounds data
if i1 - i0 < 3:
i0, i1 = 0, len(levels) # revert
levels = levels[i0:i1]
# Compare the no. of levels we *got* (levels) to what we *wanted* (N)
# If we wanted more than 2 times the result, then add nn - 1 extra
# levels in-between the returned levels *in normalized space*.
# Example: A LogNorm gives too few levels, so we select extra levels
# here, but use the locator for determining tick locations.
nn = nlevs // len(levels)
if nn >= 2:
olevels = norm(levels)
nlevels = []
for i in range(len(levels) - 1):
l1, l2 = olevels[i], olevels[i + 1]
nlevels.extend(np.linspace(l1, l2, nn + 1)[:-1])
nlevels.append(olevels[-1])
levels = norm.inverse(nlevels)
return levels, kwargs
def _parse_levels(
self, *args, N=None, levels=None, values=None, minlength=2,
positive=False, negative=False, nozero=False, norm=None, norm_kw=None,
vmin=None, vmax=None, skip_autolev=False, **kwargs,
):
"""
Return levels resulting from a wide variety of keyword options.
Parameters
----------
*args
The sample data. Passed to `_parse_vlim`.
N
Shorthand for `levels`.
levels : int or sequence of float, optional
The levels list or (approximate) number of levels to create.
values : int or sequence of float, optional
The level center list or (approximate) number of level centers to create.
minlength : int, optional
The minimum number of levels allowed.
positive, negative, nozero : bool, optional
Whether to remove out non-positive, non-negative, and zero-valued
levels. The latter is useful for single-color contour plots.
norm, norm_kw : optional
Passed to `~proplot.constructor.Norm`. Used to possbily infer levels
or to convert values to levels.
vmin, vmax
Passed to ``_parse_autolev``.
Returns
-------
levels : list of float
The level edges.
kwargs
Unused arguments.
"""
# Rigorously check user input levels and values
# NOTE: Include special case where color levels are referenced
# by string label values.
levels = _not_none(
N=N, levels=levels, norm_kw_levels=norm_kw.pop('levels', None),
)
if positive and negative:
negative = False
warnings._warn_proplot(
'Incompatible args positive=True and negative=True. Using former.'
)
if levels is not None and values is not None:
warnings._warn_proplot(
f'Incompatible args levels={levels!r} and values={values!r}. Using former.' # noqa: E501
)
for key, val in (('levels', levels), ('values', values)):
if val is None:
continue
if isinstance(norm, (mcolors.BoundaryNorm, pcolors.SegmentedNorm)):
warnings._warn_proplot(
f'Ignoring {key}={val}. Instead using norm={norm!r} boundaries.'
)
if not np.iterable(val):
continue
if len(val) < minlength:
raise ValueError(
f'Invalid {key}={val}. Must be at least length {minlength}.'
)
if len(val) >= 2 and np.any(np.sign(np.diff(val)) != np.sign(val[1] - val[0])): # noqa: E501
raise ValueError(
f'Invalid {key}={val}. Must be monotonically increasing or decreasing.' # noqa: E501
)
if isinstance(norm, (mcolors.BoundaryNorm, pcolors.SegmentedNorm)):
levels, values = norm.boundaries, None
else:
levels = _not_none(levels, rc['cmap.levels'])
# Infer level edges from level centers if possible
# NOTE: The only way for user to manually impose BoundaryNorm is by
# passing one -- users cannot create one using Norm constructor key.
descending = None
if values is None:
pass
elif isinstance(values, Integral):
levels = values + 1
elif np.iterable(values) and len(values) == 1:
levels = [values[0] - 1, values[0] + 1] # weird but why not
elif norm is None or norm in ('segments', 'segmented'):
# Try to generate levels so SegmentedNorm will place 'values' ticks at the
# center of each segment. edges() gives wrong result unless spacing is even.
# Solve: (x1 + x2) / 2 = y --> x2 = 2 * y - x1 with arbitrary starting x1.
values, descending = pcolors._sanitize_levels(values)
levels = [values[0] - (values[1] - values[0]) / 2] # arbitrary x1
for val in values:
levels.append(2 * val - levels[-1])
if any(np.diff(levels) < 0): # backup plan in event of weird ticks
levels = edges(values)
if descending: # then revert back below
levels = levels[::-1]
else:
# Generate levels by finding in-between points in the
# normalized numeric space, e.g. LogNorm space.
norm_kw = norm_kw or {}
convert = constructor.Norm(norm, **norm_kw)
levels = convert.inverse(edges(convert(values)))
# Process level edges and infer defaults
# NOTE: Matplotlib colorbar algorithm *cannot* handle descending levels so
# this function reverses them and adds special attribute to the normalizer.
# Then colorbar() reads this attr and flips the axis and the colormap direction
if np.iterable(levels) and len(levels) > 2:
levels, descending = pcolors._sanitize_levels(levels)
if not np.iterable(levels) and not skip_autolev:
levels, kwargs = self._parse_autolev(
*args, levels=levels, vmin=vmin, vmax=vmax, norm=norm, norm_kw=norm_kw, **kwargs # noqa: E501
)
ticks = values if np.iterable(values) else levels
if descending is not None:
kwargs.setdefault('descending', descending) # for _parse_discrete
if ticks is not None and np.iterable(ticks):
_guide_kw_to_arg('colorbar', kwargs, locator=ticks)
# Filter the resulting level boundaries
if levels is not None and np.iterable(levels):
if nozero and 0 in levels:
levels = levels[levels != 0]
if positive:
levels = levels[levels >= 0]
if negative:
levels = levels[levels <= 0]
return levels, kwargs
def _parse_discrete(
self, levels, norm, cmap, extend='neither', descending=False, **kwargs,
):
"""
Create a `~proplot.colors.DiscreteNorm` or `~proplot.colors.BoundaryNorm`
from the input colormap and normalizer.
Parameters
----------
levels : sequence of float
The level boundaries.
norm : `~matplotlib.colors.Normalize`
The continuous normalizer.
cmap : `~matplotlib.colors.Colormap`
The colormap.
extend : str, optional
The extend setting.
descending : bool, optional
Whether levels are descending.
Returns
-------
norm : `~proplot.colors.DiscreteNorm`
The discrete normalizer.
cmap : `~matplotlib.colors.Colormap`
The possibly-modified colormap.
kwargs
Unused arguments.
"""
# Reverse the colormap if input levels or values were descending
# See _parse_levels for details
under = cmap._rgba_under
over = cmap._rgba_over
unique = extend # default behavior
cyclic = getattr(cmap, '_cyclic', None)
qualitative = isinstance(cmap, pcolors.DiscreteColormap) # see _parse_cmap
if descending:
cmap = cmap.reversed()
# Ensure end colors are unique by scaling colors as if extend='both'
# NOTE: Inside _parse_cmap should have enforced extend='neither'
if cyclic:
step = 0.5
unique = 'both'
# Ensure color list matches level list
# NOTE: If user-input levels were integer or omitted then integer levels
# passed to level will have matched
elif qualitative:
# Truncate or wrap color list (see matplotlib.ListedColormap)
step = 0.5 # try to sample the central color index
auto_under = under is None and extend in ('min', 'both')
auto_over = over is None and extend in ('max', 'both')
ncolors = len(levels) - 1 + auto_under + auto_over
colors = list(itertools.islice(itertools.cycle(cmap.colors), ncolors))
# Create new colormap and optionally apply colors to extremes
if auto_under:
under, *colors = colors
if auto_over:
*colors, over = colors
cmap = cmap.copy(colors, N=len(colors))
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
# Ensure middle colors sample full range when extreme colors are present
# by scaling colors as if extend='neither'
else:
# Keep unique bins
step = 1.0
if over is not None and under is not None:
unique = 'neither'
# Turn off unique bin for over-bounds colors
elif over is not None:
if extend == 'both':
unique = 'min'
elif extend == 'max':
unique = 'neither'
# Turn off unique bin for under-bounds colors
elif under is not None:
if extend == 'both':
unique = 'min'
elif extend == 'max':
unique = 'neither'
# Generate DiscreteNorm and update "child" norm with vmin and vmax from
# levels. This lets the colorbar set tick locations properly!
if not isinstance(norm, mcolors.BoundaryNorm) and len(levels) > 1:
norm = pcolors.DiscreteNorm(
levels, norm=norm, descending=descending, unique=unique, step=step,
)
return norm, cmap, kwargs
@warnings._rename_kwargs('0.6', centers='values')
@_snippet_manager
def _parse_cmap(
self, *args,
cmap=None, cmap_kw=None, c=None, color=None, colors=None, default_cmap=None,
norm=None, norm_kw=None, extend='neither', vmin=None, vmax=None,
sequential=None, diverging=None, qualitative=None, cyclic=None,
discrete=None, default_discrete=True, skip_autolev=False,
line_plot=False, contour_plot=False, **kwargs
):
"""
Parse colormap and normalizer arguments.
"""
# Parse keyword args
# NOTE: Always disable 'autodiverging' when an unknown colormap is passed to
# avoid awkwardly combining 'DivergingNorm' with sequential colormaps.
# However let people use diverging=False with diverging cmaps because
# some use them (wrongly IMO but nbd) for increased color contrast.
cmap_kw = cmap_kw or {}
norm_kw = norm_kw or {}
vmin = _not_none(vmin=vmin, norm_kw_vmin=norm_kw.pop('vmin', None))
vmax = _not_none(vmax=vmax, norm_kw_vmax=norm_kw.pop('vmax', None))
colors = _not_none(c=c, color=color, colors=colors) # in case untranslated
autodiverging = rc['cmap.autodiverging']
name = getattr(cmap, 'name', cmap)
if isinstance(name, str) and name not in pcolors.CMAPS_DIVERGING:
autodiverging = False # avoid auto-truncation of sequential colormaps
# Build qualitative colormap using 'colors'
# NOTE: Try to match number of level centers with number of colors here
# WARNING: Previously 'colors' set the edgecolors. To avoid all-black
# colormap make sure to ignore 'colors' if 'cmap' was also passed.
# WARNING: Previously tried setting number of levels to len(colors) but
# this would make single-level contour plots and _parse_autolev is designed
# to only give approximate level count so failed anyway. Users should pass
# their own levels to avoid truncation/cycling in these very special cases.
if cmap is not None and colors is not None:
warnings._warn_proplot(
f'You specifed both cmap={cmap!r} and the qualitative-colormap '
f'colors={colors!r}. Ignoring the latter.'
)
colors = None
if colors is not None:
if mcolors.is_color_like(colors):
colors = [colors] # RGB[A] tuple possibly
cmap = colors = np.atleast_1d(colors)
cmap_kw['listmode'] = 'discrete'
# Create the user-input colormap
# Also force options in special cases
if line_plot:
cmap_kw['default_luminance'] = pcolors.CYCLE_LUMINANCE
if cmap is not None:
cmap = constructor.Colormap(cmap, **cmap_kw) # for testing only
cyclic = _not_none(cyclic, getattr(cmap, '_cyclic', None))
if cyclic and extend != 'neither':
warnings._warn_proplot(f"Cyclic colormaps require extend='neither'. Ignoring extend={extend!r}.") # noqa: E501
extend = 'neither'
qualitative = _not_none(qualitative, isinstance(cmap, pcolors.DiscreteColormap))
if qualitative and discrete is not None and not discrete:
warnings._warn_proplot('Qualitative colormaps require discrete=True. Ignoring discrete=False.') # noqa: E501
discrete = True
if contour_plot and discrete is not None and not discrete:
warnings._warn_proplot('Contoured plots require discrete=True. Ignoring discrete=False.') # noqa: E501
discrete = True
keys = ('levels', 'values', 'locator', 'negative', 'positive', 'symmetric')
if discrete is None and any(key in kwargs for key in keys):
discrete = True # override
else:
discrete = _not_none(discrete, rc['cmap.discrete'], default_discrete)
# Determine the appropriate 'vmin', 'vmax', and/or 'levels'
# NOTE: Unlike xarray, but like matplotlib, vmin and vmax only approximately
# determine level range. Levels are selected with Locator.tick_values().
levels = None # unused
if discrete:
levels, kwargs = self._parse_levels(
*args, vmin=vmin, vmax=vmax, norm=norm, norm_kw=norm_kw,
extend=extend, skip_autolev=skip_autolev, **kwargs
)
elif not skip_autolev:
vmin, vmax, kwargs = self._parse_vlim(
*args, vmin=vmin, vmax=vmax, **kwargs
)
if autodiverging:
default_diverging = None
if levels is not None:
_, counts = np.unique(np.sign(levels), return_counts=True)
if counts[counts > 1].size > 1:
default_diverging = True
elif vmin is not None and vmax is not None:
if np.sign(vmin) != np.sign(vmax):
default_diverging = True
diverging = _not_none(diverging, default_diverging)
# Create the continuous normalizer. Only use SegmentedNorm if necessary
# NOTE: We create normalizer here only because auto level generation depends
# on the normalizer class (e.g. LogNorm). We don't have to worry about vmin
# and vmax because they get applied to normalizer inside DiscreteNorm.
if norm is None and levels is not None and len(levels) > 0:
if len(levels) == 1: # edge case, use central colormap color
vmin = _not_none(vmin, levels[0] - 1)
vmax = _not_none(vmax, levels[0] + 1)
else:
vmin, vmax = min(levels), max(levels)
steps = np.abs(np.diff(levels))
eps = np.mean(steps) / 1e3
if np.any(np.abs(np.diff(steps)) >= eps):
norm = 'segmented'
if norm in ('segments', 'segmented'):
if np.iterable(levels):
norm_kw['levels'] = levels # apply levels
else:
norm = None # same result but much faster
if diverging:
norm = _not_none(norm, 'div')
else:
norm = _not_none(norm, 'linear')
norm = constructor.Norm(norm, vmin=vmin, vmax=vmax, **norm_kw)
if autodiverging and isinstance(norm, pcolors.DivergingNorm):
diverging = _not_none(diverging, True)
# Create the final colormap
if cmap is None:
if default_cmap is not None: # used internally
cmap = default_cmap
elif qualitative:
cmap = rc['cmap.qualitative']
elif cyclic:
cmap = rc['cmap.cyclic']
elif diverging:
cmap = rc['cmap.diverging']
elif sequential:
cmap = rc['cmap.sequential']
cmap = _not_none(cmap, rc['image.cmap'])
cmap = constructor.Colormap(cmap, **cmap_kw)
# Create the discrete normalizer
# Then finally warn and remove unused args
if levels is not None:
kwargs['extend'] = extend
norm, cmap, kwargs = self._parse_discrete(levels, norm, cmap, **kwargs)
methods = (self._parse_levels, self._parse_autolev, self._parse_vlim)
params = _pop_params(kwargs, *methods, ignore_internal=True)
if 'N' in params: # use this for lookup table N instead of levels N
cmap = cmap.copy(N=params.pop('N'))
if params:
warnings._warn_proplot(f'Ignoring unused keyword args(s): {params}')
# Update outgoing args
# NOTE: With contour(..., discrete=False, levels=levels) users can bypass
# proplot's level selection and use native matplotlib level selection
if contour_plot:
kwargs['levels'] = levels
kwargs['extend'] = extend
kwargs.update({'cmap': cmap, 'norm': norm})
_guide_kw_to_arg('colorbar', kwargs, extend=extend)
return kwargs
def _iter_pairs(self, *args):
"""
Iterate over ``[x1,] y1, [fmt1,] [x2,] y2, [fmt2,] ...`` input.
"""
# NOTE: This is copied from _process_plot_var_args.__call__ to avoid relying
# on private API. We emulate this input style with successive plot() calls.
args = list(args)
while args: # this permits empty input
x, y, *args = args
if args and isinstance(args[0], str): # format string detected!
fmt, *args = args
elif isinstance(y, str): # omits some of matplotlib's rigor but whatevs
x, y, fmt = None, x, y
else:
fmt = None
yield x, y, fmt
def _iter_columns(self, *args, label=None, labels=None, values=None, **kwargs):
"""
Iterate over columns of positional arguments and add successive ``'label'``
keyword arguments using the input label-list ``'labels'``.
"""
# Handle cycle args and label lists
# NOTE: Arrays here should have had metadata stripped by _standardize_1d
# but could still be pint quantities that get processed by axis converter.
n = max(1 if not _is_array(a) or a.ndim < 2 else a.shape[-1] for a in args)
labels = _not_none(label=label, values=values, labels=labels)
if not np.iterable(labels) or isinstance(labels, str):
labels = n * [labels]
if len(labels) != n:
raise ValueError(f'Array has {n} columns but got {len(labels)} labels.')
if labels is not None:
labels = [str(_not_none(label, '')) for label in _to_numpy_array(labels)]
else:
labels = n * [None]
# Yield successive columns
for i in range(n):
kw = kwargs.copy()
kw['label'] = labels[i] or None
a = tuple(a if not _is_array(a) or a.ndim < 2 else a[..., i] for a in args)
yield (i, n, *a, kw)
def _parse_cycle(
self, ncycle=None, *,
cycle=None, cycle_kw=None, cycle_manually=None, return_cycle=False, **kwargs
):
"""
Parse property cycle-related arguments.
"""
# Create the property cycler and update it if necessary
# NOTE: Matplotlib Cycler() objects have built-in __eq__ operator
# so really easy to check if the cycler has changed!
if cycle is not None or cycle_kw:
cycle_kw = cycle_kw or {}
if ncycle != 1: # ignore for column-by-column plotting commands
cycle_kw.setdefault('N', ncycle) # if None then filled in Colormap()
if isinstance(cycle, str) and cycle.lower() == 'none':
cycle = False
if not cycle:
args = ()
elif cycle is True: # consistency with 'False' ('reactivate' the cycler)
args = (rc['axes.prop_cycle'],)
else:
args = (cycle,)
cycle = constructor.Cycle(*args, **cycle_kw)
with warnings.catch_warnings(): # hide 'elementwise-comparison failed'
warnings.simplefilter('ignore', FutureWarning)
if return_cycle:
pass
elif cycle != self._active_cycle:
self.set_prop_cycle(cycle)
# Manually extract and apply settings to outgoing keyword arguments
# if native matplotlib function does not include desired properties
cycle_manually = cycle_manually or {}
parser = self._get_lines # the _process_plot_var_args instance
props = {} # which keys to apply from property cycler
for prop, key in cycle_manually.items():
value = kwargs.get(key, None)
if value is None and prop in parser._prop_keys:
props[prop] = key
if props:
dict_ = next(parser.prop_cycler)
for prop, key in props.items():
value = dict_[prop]
if key == 'c': # special case: scatter() color must be converted to hex
value = pcolors.to_hex(value)
kwargs[key] = value
if return_cycle:
return cycle, kwargs # needed for stem() to apply in a context()
else:
return kwargs
def _error_bars(
self, x, y, *_, distribution=None,
default_bars=True, default_boxes=False,
barstd=None, barstds=None, barpctile=None, barpctiles=None, bardata=None,
boxstd=None, boxstds=None, boxpctile=None, boxpctiles=None, boxdata=None,
capsize=None, **kwargs,
):
"""
Add up to 2 error indicators: thick "boxes" and thin "bars".
"""
# Parse input args
# NOTE: Want to keep _error_bars() and _error_shading() separate. But also
# want default behavior where some default error indicator is shown if user
# requests means/medians only. Result is the below kludge.
kwargs, vert = _get_vert(**kwargs)
barstds = _not_none(barstd=barstd, barstds=barstds)
boxstds = _not_none(boxstd=boxstd, boxstds=boxstds)
barpctiles = _not_none(barpctile=barpctile, barpctiles=barpctiles)
boxpctiles = _not_none(boxpctile=boxpctile, boxpctiles=boxpctiles)
bars = any(_ is not None for _ in (bardata, barstds, barpctiles))
boxes = any(_ is not None for _ in (boxdata, boxstds, boxpctiles))
shade = any(
prefix + suffix in key for key in kwargs
for prefix in ('shade', 'fade') for suffix in ('std', 'pctile', 'data')
)
if distribution is not None and not bars and not boxes and not shade:
barstds = bars = default_bars
boxstds = boxes = default_boxes
# Error bar properties
edgecolor = kwargs.get('edgecolor', rc['boxplot.whiskerprops.color'])
barprops = _pop_props(kwargs, 'line', ignore='marker', prefix='bar')
barprops['capsize'] = _not_none(capsize, rc['errorbar.capsize'])
barprops['linestyle'] = 'none'
barprops.setdefault('color', edgecolor)
barprops.setdefault('zorder', 2.5)
barprops.setdefault('linewidth', rc['boxplot.whiskerprops.linewidth'])
# Error box properties
# NOTE: Includes 'markerfacecolor' and 'markeredgecolor' props
boxprops = _pop_props(kwargs, 'line', prefix='box')
boxprops['capsize'] = 0
boxprops['linestyle'] = 'none'
boxprops.setdefault('color', barprops['color'])
boxprops.setdefault('zorder', barprops['zorder'])
boxprops.setdefault('linewidth', 4 * barprops['linewidth'])
# Box marker properties
boxmarker = {key: boxprops.pop(key) for key in tuple(boxprops) if 'marker' in key} # noqa: E501
boxmarker['c'] = _not_none(boxmarker.pop('markerfacecolor', None), 'white')
boxmarker['s'] = _not_none(boxmarker.pop('markersize', None), boxprops['linewidth'] ** 0.5) # noqa: E501
boxmarker['zorder'] = boxprops['zorder']
boxmarker['edgecolor'] = boxmarker.pop('markeredgecolor', None)
boxmarker['linewidth'] = boxmarker.pop('markerlinewidth', None)
if boxmarker.get('marker') is True:
boxmarker['marker'] = 'o'
elif default_boxes: # enable by default
boxmarker.setdefault('marker', 'o')
# Draw thin or thick error bars from distributions or explicit errdata
sy = 'y' if vert else 'x' # yerr
ex, ey = (x, y) if vert else (y, x)
eobjs = []
if bars: # now impossible to make thin bar width different from cap width!
edata, _ = _distribution_range(
y, distribution,
stds=barstds, pctiles=barpctiles, errdata=bardata,
stds_default=(-3, 3), pctiles_default=(0, 100),
)
obj = self.errorbar(ex, ey, **barprops, **{sy + 'err': edata})
eobjs.append(obj)
if boxes: # must go after so scatter point can go on top
edata, _ = _distribution_range(
y, distribution,
stds=boxstds, pctiles=boxpctiles, errdata=boxdata,
stds_default=(-1, 1), pctiles_default=(25, 75),
)
obj = self.errorbar(ex, ey, **boxprops, **{sy + 'err': edata})
if boxmarker.get('marker', None):
self.scatter(ex, ey, **boxmarker)
eobjs.append(obj)
kwargs['distribution'] = distribution
return (*eobjs, kwargs)
def _error_shading(
self, x, y, *_, distribution=None, color_key='color',
shadestd=None, shadestds=None, shadepctile=None, shadepctiles=None, shadedata=None, # noqa: E501
fadestd=None, fadestds=None, fadepctile=None, fadepctiles=None, fadedata=None,
shadelabel=False, fadelabel=False, **kwargs
):
"""
Add up to 2 error indicators: more opaque "shading" and less opaque "fading".
"""
kwargs, vert = _get_vert(**kwargs)
shadestds = _not_none(shadestd=shadestd, shadestds=shadestds)
fadestds = _not_none(fadestd=fadestd, fadestds=fadestds)
shadepctiles = _not_none(shadepctile=shadepctile, shadepctiles=shadepctiles)
fadepctiles = _not_none(fadepctile=fadepctile, fadepctiles=fadepctiles)
shade = any(_ is not None for _ in (shadedata, shadestds, shadepctiles))
fade = any(_ is not None for _ in (fadedata, fadestds, fadepctiles))
# Shading properties
shadeprops = _pop_props(kwargs, 'patch', prefix='shade')
shadeprops.setdefault('alpha', 0.4)
shadeprops.setdefault('zorder', 1.5)
shadeprops.setdefault('linewidth', rc['patch.linewidth'])
shadeprops.setdefault('edgecolor', 'none')
# Fading properties
fadeprops = _pop_props(kwargs, 'patch', prefix='fade')
fadeprops.setdefault('zorder', shadeprops['zorder'])
fadeprops.setdefault('alpha', 0.5 * shadeprops['alpha'])
fadeprops.setdefault('linewidth', shadeprops['linewidth'])
fadeprops.setdefault('edgecolor', 'none')
# Get default color then apply to outgoing keyword args so
# that plotting function will not advance to next cycler color.
# TODO: More robust treatment of 'color' vs. 'facecolor'
if (
shade and shadeprops.get('facecolor', None) is None
or fade and fadeprops.get('facecolor', None) is None
):
color = kwargs.get(color_key, None)
if color is None: # add to outgoing
color = kwargs[color_key] = self._get_lines.get_next_color()
shadeprops.setdefault('facecolor', color)
fadeprops.setdefault('facecolor', color)
# Draw dark and light shading from distributions or explicit errdata
eobjs = []
fill = self.fill_between if vert else self.fill_betweenx
if fade:
edata, label = _distribution_range(
y, distribution,
stds=fadestds, pctiles=fadepctiles, errdata=fadedata,
stds_default=(-3, 3), pctiles_default=(0, 100),
label=fadelabel, absolute=True,
)
eobj = fill(x, *edata, label=label, **fadeprops)
eobjs.append(eobj)
if shade:
edata, label = _distribution_range(
y, distribution,
stds=shadestds, pctiles=shadepctiles, errdata=shadedata,
stds_default=(-2, 2), pctiles_default=(10, 90),
label=shadelabel, absolute=True,
)
eobj = fill(x, *edata, label=label, **shadeprops)
eobjs.append(eobj)
kwargs['distribution'] = distribution
return (*eobjs, kwargs)
def _label_contours(
self, obj, cobj, fmt, *, c=None, color=None, colors=None,
size=None, fontsize=None, inline_spacing=None, **kwargs
):
"""
Add labels to contours with support for shade-dependent filled contour labels.
Text color is inferred from filled contour object and labels are always drawn
on unfilled contour object (otherwise errors crop up).
"""
# Parse input args
colors = _not_none(c=c, color=color, colors=colors)
fontsize = _not_none(size=size, fontsize=fontsize, default=rc['font.smallsize'])
inline_spacing = _not_none(inline_spacing, 2.5)
text_kw = {}
clabel_keys = ('levels', 'inline', 'manual', 'rightside_up', 'use_clabeltext')
for key in tuple(kwargs): # allow dict to change size
if key not in clabel_keys:
text_kw[key] = kwargs.pop(key)
# Draw hidden additional contour for filled contour labels
cobj = _not_none(cobj, obj)
colors = kwargs.pop('colors', None)
if obj.filled and colors is None:
colors = []
for level in obj.levels:
_, _, lum = to_xyz(obj.cmap(obj.norm(level)))
colors.append('w' if lum < 50 else 'k')
# Draw labels
labs = cobj.clabel(
fmt=fmt, colors=colors, fontsize=fontsize, inline_spacing=inline_spacing, **kwargs # noqa: E501
)
if labs is not None: # returns None if no contours
for lab in labs:
lab.update(text_kw)
return labs
def _label_gridboxes(
self, obj, fmt, *, c=None, color=None, colors=None, size=None, fontsize=None, **kwargs # noqa: E501
):
"""
Add labels to pcolor boxes with support for shade-dependent text colors.
Values are inferred from the unnormalized grid box color.
"""
# Parse input args
# NOTE: This function also hides grid boxes filled with NaNs to avoid ugly
# issue where edge colors surround NaNs. Should maybe move this somewhere else.
obj.update_scalarmappable() # update 'edgecolors' list
color = _not_none(c=c, color=color, colors=colors)
fontsize = _not_none(size=size, fontsize=fontsize, default=rc['font.smallsize'])
kwargs.setdefault('ha', 'center')
kwargs.setdefault('va', 'center')
# Apply colors and hide edge colors for empty grids
# NOTE: Could also
labs = []
array = obj.get_array()
paths = obj.get_paths()
edgecolors = _to_numpy_array(obj.get_edgecolors())
if len(edgecolors) == 1:
edgecolors = np.repeat(edgecolors, len(array), axis=0)
for i, (path, value) in enumerate(zip(paths, array)):
# Round to the number corresponding to the *color* rather than
# the exact data value. Similar to contour label numbering.
if value is ma.masked or not np.isfinite(value):
edgecolors[i, :] = 0
continue
if isinstance(obj.norm, pcolors.DiscreteNorm):
value = obj.norm._norm.inverse(obj.norm(value))
icolor = color
if color is None:
_, _, lum = to_xyz(obj.cmap(obj.norm(value)), 'hcl')
icolor = 'w' if lum < 50 else 'k'
bbox = path.get_extents()
x = (bbox.xmin + bbox.xmax) / 2
y = (bbox.ymin + bbox.ymax) / 2
lab = self.text(x, y, fmt(value), color=icolor, size=fontsize, **kwargs)
labs.append(lab)
obj.set_edgecolors(edgecolors)
return labs
def _auto_labels(
self, obj, cobj=None, labels=False, labels_kw=None,
fmt=None, formatter=None, formatter_kw=None, precision=None,
):
"""
Add number labels. Default formatter is `~proplot.ticker.SimpleFormatter`
with a default maximum precision of ``3`` decimal places.
"""
# TODO: Add quiverkey to this!
if not labels:
return
labels_kw = labels_kw or {}
formatter_kw = formatter_kw or {}
formatter = _not_none(
fmt_labels_kw=labels_kw.pop('fmt', None),
formatter_labels_kw=labels_kw.pop('formatter', None),
fmt=fmt,
formatter=formatter,
default='simple'
)
precision = _not_none(
formatter_kw_precision=formatter_kw.pop('precision', None),
precision=precision,
default=3, # should be lower than the default intended for tick labels
)
formatter = constructor.Formatter(formatter, precision=precision, **formatter_kw) # noqa: E501
if isinstance(obj, mcontour.ContourSet):
self._label_contours(obj, cobj, formatter, **labels_kw)
elif isinstance(obj, mcollections.Collection):
self._label_gridboxes(obj, formatter, **labels_kw)
else:
raise RuntimeError(f'Not possible to add labels to object {obj!r}.')
def _fix_edges(self, obj, edgefix=None, **kwargs):
"""
Fix white lines between between filled contours and mesh and fix issues with
colormaps that are transparent. Also takes collection-translated keyword
args and if it detects any were passed then we skip this step.
"""
# See: https://github.com/jklymak/contourfIssues
# See: https://stackoverflow.com/q/15003353/4970632
# NOTE: Use default edge width used for pcolor grid box edges. This is thick
# enough to hide lines but thin enough to not add 'dots' to corners of boxes.
edgefix = _not_none(edgefix, rc['cmap.edgefix'], True)
linewidth = EDGEWIDTH if edgefix is True else 0 if edgefix is False else edgefix
if not linewidth or not isinstance(obj, mcm.ScalarMappable):
return
if any(key in kwargs for key in ('linewidths', 'linestyles', 'edgecolors')):
return
# Remove edges when cmap has transparency
cmap = obj.cmap
if not cmap._isinit:
cmap._init()
if all(cmap._lut[:-1, 3] == 1): # skip for cmaps with transparency
edgecolor = 'face'
else:
edgecolor = 'none'
# Apply fixes
# NOTE: This also covers TriContourSet returned by tricontour
if isinstance(obj, mcollections.Collection):
obj.set_linewidth(linewidth)
obj.set_edgecolor(edgecolor)
if isinstance(obj, mcontour.ContourSet):
if not obj.filled:
return
for contour in obj.collections:
contour.set_linestyle('-')
contour.set_linewidth(linewidth)
contour.set_edgecolor(edgecolor)
def _apply_plot(self, *pairs, vert=True, **kwargs):
"""
Plot standard lines.
"""
# Plot the lines
objs = []
kws = kwargs.copy()
_process_props(kws, 'line')
kws, extents = self._parse_inbounds(**kws)
for xs, ys, fmt in self._iter_pairs(*pairs):
xs, ys, kw = self._standardize_1d(xs, ys, vert=vert, **kws)
ys, kw = _distribution_reduce(ys, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide) # after standardize
for _, n, x, y, kw in self._iter_columns(xs, ys, **kw):
*eb, kw = self._error_bars(x, y, vert=vert, **kw)
*es, kw = self._error_shading(x, y, vert=vert, **kw)
kw = self._parse_cycle(n, **kw)
if not vert:
x, y = y, x
a = [x, y]
if fmt is not None: # x1, y1, fmt1, x2, y2, fm2... style input
a.append(fmt)
obj, = self._plot_safe('plot', *a, **kw)
self._restrict_inbounds(extents, x, y)
objs.append((*eb, *es, obj) if eb or es else obj)
# Add sticky edges
axis = 'x' if vert else 'y'
for obj in objs:
if not isinstance(obj, mlines.Line2D):
continue # TODO: still looks good with error caps???
data = getattr(obj, 'get_' + axis + 'data')()
if not data.size:
continue
convert = getattr(self, 'convert_' + axis + 'units')
edges = getattr(obj.sticky_edges, axis)
min_, max_ = _safe_range(data)
if min_ is not None:
edges.append(convert(min_))
if max_ is not None:
edges.append(convert(max_))
self._add_queued_guide(objs, **guide_kw)
return objs # always return list to match matplotlib behavior
@_snippet_manager
def line(self, *args, **kwargs):
"""
%(plot.plot)s
"""
return self.plot(*args, **kwargs)
@_snippet_manager
def linex(self, *args, **kwargs):
"""
%(plot.plotx)s
"""
return self.plotx(*args, **kwargs)
@_preprocess_data('x', 'y', allow_extra=True)
@docstring._concatenate_original
@_snippet_manager
def plot(self, *args, **kwargs):
"""
%(plot.plot)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_plot(*args, **kwargs)
@_preprocess_data('y', 'x', allow_extra=True)
@_snippet_manager
def plotx(self, *args, **kwargs):
"""
%(plot.plotx)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_plot(*args, **kwargs)
def _apply_step(self, *pairs, vert=True, where='pre', **kwargs):
"""
Plot the steps.
"""
# Plot the steps
# NOTE: Internally matplotlib plot() calls step() so we could use that
# approach... but instead repeat _apply_plot internals here so we can
# disable error indications that make no sense for 'step' plots.
kws = kwargs.copy()
if where not in ('pre', 'post', 'mid'):
raise ValueError(f"Invalid where={where!r}. Options are 'pre', 'post', 'mid'.") # noqa: E501
_process_props(kws, 'line')
kws.setdefault('drawstyle', 'steps-' + where)
kws, extents = self._parse_inbounds(**kws)
objs = []
for xs, ys, fmt in self._iter_pairs(*pairs):
xs, ys, kw = self._standardize_1d(xs, ys, vert=vert, **kws)
guide_kw = _pop_params(kw, self._add_queued_guide) # after standardize
if fmt is not None:
kw['fmt'] = fmt
for _, n, x, y, *a, kw in self._iter_columns(xs, ys, **kw):
kw = self._parse_cycle(n, **kw)
if not vert:
x, y = y, x
obj, = self._plot_safe('step', x, y, *a, **kw)
self._restrict_inbounds(extents, x, y)
objs.append(obj)
self._add_queued_guide(objs, **guide_kw)
return objs # always return list to match matplotlib behavior
@_preprocess_data('x', 'y', allow_extra=True)
@docstring._concatenate_original
@_snippet_manager
def step(self, *args, **kwargs):
"""
%(plot.step)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_step(*args, **kwargs)
@_preprocess_data('y', 'x', allow_extra=True)
@_snippet_manager
def stepx(self, *args, **kwargs):
"""
%(plot.stepx)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_step(*args, **kwargs)
def _apply_stem(
self, x, y, *,
linefmt=None, markerfmt=None, basefmt=None, orientation=None, **kwargs
):
"""
Plot stem lines and markers.
"""
# Parse input
kw = kwargs.copy()
kw, extents = self._parse_inbounds(**kw)
x, y, kw = self._standardize_1d(x, y, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
# Set default colors
# NOTE: 'fmt' strings can only be 2 to 3 characters and include color
# shorthands like 'r' or cycle colors like 'C0'. Cannot use full color names.
# NOTE: Matplotlib defaults try to make a 'reddish' color the base and 'bluish'
# color the stems. To make this more robust we temporarily replace the cycler.
# Bizarrely stem() only reads from the global cycler() so have to update it.
fmts = (linefmt, basefmt, markerfmt)
orientation = _not_none(orientation, 'vertical')
if not any(isinstance(fmt, str) and re.match(r'\AC[0-9]', fmt) for fmt in fmts):
cycle = constructor.Cycle((rc['negcolor'], rc['poscolor']), name='_no_name')
kw.setdefault('cycle', cycle)
kw['basefmt'] = _not_none(basefmt, 'C1-') # red base
kw['linefmt'] = linefmt = _not_none(linefmt, 'C0-') # blue stems
kw['markerfmt'] = _not_none(markerfmt, linefmt[:-1] + 'o') # blue marker
sig = inspect.signature(maxes.Axes.stem)
if 'use_line_collection' in sig.parameters:
kw.setdefault('use_line_collection', True)
# Call function then restore property cycle
# WARNING: Horizontal stem plots are only supported in recent versions of
# matplotlib. Let matplotlib raise an error if need be.
ctx = {}
cycle, kw = self._parse_cycle(return_cycle=True, **kw) # allow re-application
if cycle is not None:
ctx['axes.prop_cycle'] = cycle
if orientation == 'horizontal': # may raise error
kw['orientation'] = orientation
with rc.context(ctx):
obj = self._plot_safe('stem', x, y, **kw)
self._restrict_inbounds(extents, x, y, orientation=orientation)
self._add_queued_guide(obj, **guide_kw)
return obj
@_preprocess_data('x', 'y')
@docstring._concatenate_original
@_snippet_manager
def stem(self, *args, **kwargs):
"""
%(plot.stem)s
"""
kwargs = _parse_vert(default_orientation='vertical', **kwargs)
return self._apply_stem(*args, **kwargs)
@_preprocess_data('x', 'y')
@_snippet_manager
def stemx(self, *args, **kwargs):
"""
%(plot.stemx)s
"""
kwargs = _parse_vert(default_orientation='horizontal', **kwargs)
return self._apply_stem(*args, **kwargs)
@_preprocess_data('x', 'y', ('c', 'color', 'colors', 'values'))
@_snippet_manager
def parametric(self, x, y, c, *, interp=0, scalex=True, scaley=True, **kwargs):
"""
%(plot.parametric)s
"""
# Standardize arguments
# NOTE: Values are inferred in _auto_format() the same way legend labels are
# inferred. Will not always return an array like inferred coordinates do.
kw = kwargs.copy()
_process_props(kw, 'collection')
kw, extents = self._parse_inbounds(**kw)
x, y, kw = self._standardize_1d(x, y, values=c, autovalues=True, autoreverse=False, **kw) # noqa: E501
c = kw.pop('values', None) # permits inferring values e.g. a simple ordinate
c = np.arange(y.size) if c is None else _to_numpy_array(c)
c, colorbar_kw = _get_coords(c, which='')
_guide_kw_to_arg('colorbar', kw, **colorbar_kw)
_guide_kw_to_arg('colorbar', kw, locator=c)
# Interpolate values to allow for smooth gradations between values or just
# to color siwtchover halfway between points (interp True, False respectively)
if interp > 0:
x_orig, y_orig, v_orig = x, y, c
x, y, c = [], [], []
for j in range(x_orig.shape[0] - 1):
idx = slice(None)
if j + 1 < x_orig.shape[0] - 1:
idx = slice(None, -1)
x.extend(np.linspace(x_orig[j], x_orig[j + 1], interp + 2)[idx].flat)
y.extend(np.linspace(y_orig[j], y_orig[j + 1], interp + 2)[idx].flat)
c.extend(np.linspace(v_orig[j], v_orig[j + 1], interp + 2)[idx].flat) # noqa: E501
x, y, c = np.array(x), np.array(y), np.array(c)
# Get coordinates and values for points to the 'left' and 'right' of joints
coords = []
for i in range(y.shape[0]):
icoords = np.empty((3, 2))
for j, arr in enumerate((x, y)):
icoords[0, j] = arr[0] if i == 0 else 0.5 * (arr[i - 1] + arr[i])
icoords[1, j] = arr[i]
icoords[2, j] = arr[-1] if i + 1 == y.shape[0] else 0.5 * (arr[i + 1] + arr[i]) # noqa: E501
coords.append(icoords)
coords = np.array(coords)
# Get the colormap accounting for 'discrete' mode
discrete = kw.get('discrete', None)
if discrete is not None and not discrete:
a = (x, y, c) # pick levels from vmin and vmax, possibly limiting range
else:
a, kw['values'] = (), c
kw = self._parse_cmap(*a, line_plot=True, **kw)
cmap, norm = kw.pop('cmap'), kw.pop('norm')
# Add collection with some custom attributes
# NOTE: Modern API uses self._request_autoscale_view but this is
# backwards compatible to earliest matplotlib versions.
guide_kw = _pop_params(kw, self._add_queued_guide)
obj = mcollections.LineCollection(
coords, cmap=cmap, norm=norm,
linestyles='-', capstyle='butt', joinstyle='miter',
)
obj.set_array(c) # the ScalarMappable method
obj.update({key: value for key, value in kw.items() if key not in ('color',)})
self.add_collection(obj)
self.autoscale_view(scalex=scalex, scaley=scaley)
self._add_queued_guide(obj, **guide_kw)
return obj
def _apply_lines(
self, xs, ys1, ys2, colors, *,
vert=True, stack=None, stacked=None, negpos=False, **kwargs
):
"""
Plot vertical or hotizontal lines at each point.
"""
# Parse input arguments
kw = kwargs.copy()
name = 'vlines' if vert else 'hlines'
if colors is not None:
kw['colors'] = colors
_process_props(kw, 'collection')
kw, extents = self._parse_inbounds(**kw)
stack = _not_none(stack=stack, stacked=stacked)
xs, ys1, ys2, kw = self._standardize_1d(xs, ys1, ys2, vert=vert, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
# Support "negative" and "positive" lines
# TODO: Ensure 'linewidths' etc. are applied! For some reason
# previously thought they had to be manually applied.
y0 = 0
objs, sides = [], []
for _, n, x, y1, y2, kw in self._iter_columns(xs, ys1, ys2, **kw):
kw = self._parse_cycle(n, **kw)
if stack:
y1 = y1 + y0 # avoid in-place modification
y2 = y2 + y0
y0 = y0 + y2 - y1 # irrelevant that we added y0 to both
if negpos:
obj = self._plot_negpos(name, x, y1, y2, colorkey='colors', **kw)
else:
obj = self._plot_safe(name, x, y1, y2, **kw)
for y in (y1, y2):
self._restrict_inbounds(extents, x, y, vert=vert)
if y.size == 1: # add sticky edges if bounds are scalar
sides.append(y)
objs.append(obj)
# Draw guide and add sticky edges
self._add_sticky_edges(objs, 'y' if vert else 'x', *sides)
self._add_queued_guide(objs, **guide_kw)
return objs[0] if len(objs) == 1 else objs
# WARNING: breaking change from native 'ymin' and 'ymax'
@_preprocess_data('x', 'y1', 'y2', ('c', 'color', 'colors'))
@_snippet_manager
def vlines(self, *args, **kwargs):
"""
%(plot.vlines)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_lines(*args, **kwargs)
# WARNING: breaking change from native 'xmin' and 'xmax'
@_preprocess_data('y', 'x1', 'x2', ('c', 'color', 'colors'))
@_snippet_manager
def hlines(self, *args, **kwargs):
"""
%(plot.hlines)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_lines(*args, **kwargs)
def _parse_markersize(self, s, *, smin=None, smax=None, **kwargs):
"""
Scale the marker sizes with optional keyword args.
"""
if np.atleast_1d(s).size == 1: # None or scalar
return s, kwargs
smin_true, smax_true = _safe_range(s)
smin_true = _not_none(smin_true, 0)
smax_true = _not_none(smax_true, rc['lines.markersize'])
smin = _not_none(smin, smin_true)
smax = _not_none(smax, smax_true)
s = smin + (smax - smin) * (s - smin_true) / (smax_true - smin_true)
return s, kwargs
def _apply_scatter(self, xs, ys, ss, cc, *, vert=True, **kwargs):
"""
Apply scatter or scatterx markers.
"""
# Apply from property cycle. Keys are cycle keys and values are scatter keys
# NOTE: Matplotlib uses the property cycler in _get_patches_for_fill for
# scatter() plots. It only ever inherits color from that. We instead use
# _get_lines to help overarching goal of unifying plot() and scatter().
cycle_manually = {
'color': 'c',
'markersize': 's',
'linewidth': 'linewidths',
'linestyle': 'linestyles',
'markeredgewidth': 'linewidths',
'markeredgecolor': 'edgecolors',
'alpha': 'alpha',
'marker': 'marker',
}
# Iterate over the columns
kw = kwargs.copy()
_process_props(kw, 'line')
kw, extents = self._parse_inbounds(**kw)
xs, ys, kw = self._standardize_1d(xs, ys, vert=vert, autoreverse=False, **kw)
ss, kw = self._parse_markersize(ss, **kw) # parse 's'
cc, kw = self._parse_color(xs, ys, cc, apply_cycle=False, **kw) # parse 'c'
ys, kw = _distribution_reduce(ys, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
objs = []
for _, n, x, y, s, c, kw in self._iter_columns(xs, ys, ss, cc, **kw):
kw['s'], kw['c'] = s, c # make _parse_cycle() detect these
*eb, kw = self._error_bars(x, y, vert=vert, **kw)
*es, kw = self._error_shading(x, y, vert=vert, color_key='c', **kw)
kw = self._parse_cycle(n, cycle_manually=cycle_manually, **kw)
if not vert:
x, y = y, x
obj = self._plot_safe('scatter', x, y, **kw)
self._restrict_inbounds(extents, x, y)
objs.append((*eb, *es, obj) if eb or es else obj)
self._add_queued_guide(objs, **guide_kw)
return objs[0] if len(objs) == 1 else objs
@_preprocess_data(
'x', 'y', ('s', 'ms', 'markersize'), ('c', 'color', 'colors'),
keywords=('lw', 'linewidth', 'linewidths', 'ec', 'edgecolor', 'edgecolors', 'fc', 'facecolor', 'facecolors') # noqa: E501
)
@docstring._concatenate_original
@_snippet_manager
def scatter(self, *args, **kwargs):
"""
%(plot.scatter)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_scatter(*args, **kwargs)
@_preprocess_data(
'y', 'x', ('s', 'ms', 'markersize'), ('c', 'color', 'colors'),
keywords=('lw', 'linewidth', 'linewidths', 'ec', 'edgecolor', 'edgecolors', 'fc', 'facecolor', 'facecolors') # noqa: E501
)
@_snippet_manager
def scatterx(self, *args, **kwargs):
"""
%(plot.scatterx)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_scatter(*args, **kwargs)
def _apply_fill(
self, xs, ys1, ys2, where, *,
vert=True, negpos=None, stack=None, stacked=None, **kwargs
):
"""
Apply area shading.
"""
# Parse input arguments
kw = kwargs.copy()
_process_props(kw, 'patch')
kw, extents = self._parse_inbounds(**kw)
name = 'fill_between' if vert else 'fill_betweenx'
stack = _not_none(stack=stack, stacked=stacked)
xs, ys1, ys2, kw = self._standardize_1d(xs, ys1, ys2, vert=vert, **kw)
# Draw patches with default edge width zero
y0 = 0
objs, xsides, ysides = [], [], []
guide_kw = _pop_params(kw, self._add_queued_guide)
for _, n, x, y1, y2, w, kw in self._iter_columns(xs, ys1, ys2, where, **kw):
kw = self._parse_cycle(n, **kw)
if stack:
y1 = y1 + y0 # avoid in-place modification
y2 = y2 + y0
y0 = y0 + y2 - y1 # irrelevant that we added y0 to both
if negpos:
# NOTE: pass 'where' so plot_negpos can ignore it and issue a warning
obj = self._plot_negpos(name, x, y1, y2, where=w, use_where=True, **kw)
else:
obj = self._plot_safe(name, x, y1, y2, where=w, **kw)
xsides.append(x)
for y in (y1, y2):
self._restrict_inbounds(extents, x, y, vert=vert)
if y.size == 1: # add sticky edges if bounds are scalar
ysides.append(y)
objs.append(obj)
# Draw guide and add sticky edges
self._add_queued_guide(objs, **guide_kw)
for axis, sides in zip('xy' if vert else 'yx', (xsides, ysides)):
self._add_sticky_edges(objs, axis, *sides)
return objs[0] if len(objs) == 1 else objs
@_snippet_manager
def area(self, *args, **kwargs):
"""
%(plot.fill_between)s
"""
return self.fill_between(*args, **kwargs)
@_snippet_manager
def areax(self, *args, **kwargs):
"""
%(plot.fill_betweenx)s
"""
return self.fill_betweenx(*args, **kwargs)
@_preprocess_data('x', 'y1', 'y2', 'where')
@docstring._concatenate_original
@_snippet_manager
def fill_between(self, *args, **kwargs):
"""
%(plot.fill_between)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_fill(*args, **kwargs)
@_preprocess_data('y', 'x1', 'x2', 'where')
@docstring._concatenate_original
@_snippet_manager
def fill_betweenx(self, *args, **kwargs):
"""
%(plot.fill_betweenx)s
"""
# NOTE: The 'horizontal' orientation will be inferred by downstream
# wrappers using the function name.
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_fill(*args, **kwargs)
@staticmethod
def _convert_bar_width(x, width=1):
"""
Convert bar plot widths from relative to coordinate spacing. Relative
widths are much more convenient for users.
"""
# WARNING: This will fail for non-numeric non-datetime64 singleton
# datatypes but this is good enough for vast majority of cases.
x_test = _to_numpy_array(x)
if len(x_test) >= 2:
x_step = x_test[1:] - x_test[:-1]
x_step = np.concatenate((x_step, x_step[-1:]))
elif x_test.dtype == np.datetime64:
x_step = np.timedelta64(1, 'D')
else:
x_step = np.array(0.5)
if np.issubdtype(x_test.dtype, np.datetime64):
# Avoid integer timedelta truncation
x_step = x_step.astype('timedelta64[ns]')
return width * x_step
def _apply_bar(
self, xs, hs, ws, bs, *, absolute_width=False,
stack=None, stacked=None, negpos=False, orientation='vertical', **kwargs
):
"""
Apply bar or barh command. Support default "minima" at zero.
"""
# Parse args
kw = kwargs.copy()
kw, extents = self._parse_inbounds(**kw)
name = 'barh' if orientation == 'horizontal' else 'bar'
stack = _not_none(stack=stack, stacked=stacked)
xs, hs, kw = self._standardize_1d(xs, hs, orientation=orientation, **kw)
# Call func after converting bar width
b0 = 0
objs = []
_process_props(kw, 'patch')
kw.setdefault('edgecolor', 'black')
hs, kw = _distribution_reduce(hs, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
for i, n, x, h, w, b, kw in self._iter_columns(xs, hs, ws, bs, **kw):
kw = self._parse_cycle(n, **kw)
# Adjust x or y coordinates for grouped and stacked bars
w = _not_none(w, np.array([0.8])) # same as mpl but in *relative* units
b = _not_none(b, np.array([0.0])) # same as mpl
if not absolute_width:
w = self._convert_bar_width(x, w)
if stack:
b = b + b0
b0 = b0 + h
else: # instead "group" the bars (this is no-op if we have 1 column)
w = w / n # rescaled
o = 0.5 * (n - 1) # center coordinate
x = x + w * (i - o) # += may cause integer/float casting issue
# Draw simple bars
*eb, kw = self._error_bars(x, b + h, orientation=orientation, **kw)
if negpos:
obj = self._plot_negpos(name, x, h, w, b, use_zero=True, **kw)
else:
obj = self._plot_safe(name, x, h, w, b, **kw)
for y in (b, b + h):
self._restrict_inbounds(extents, x, y, orientation=orientation)
objs.append((*eb, obj) if eb else obj)
self._add_queued_guide(objs, **guide_kw)
return objs[0] if len(objs) == 1 else objs
@_preprocess_data('x', 'height', 'width', 'bottom')
@docstring._concatenate_original
@_snippet_manager
def bar(self, *args, **kwargs):
"""
%(plot.bar)s
"""
kwargs = _parse_vert(default_orientation='vertical', **kwargs)
return self._apply_bar(*args, **kwargs)
# WARNING: Swap 'height' and 'width' here so that they are always relative
# to the 'tall' axis. This lets people always pass 'width' as keyword
@_preprocess_data('y', 'height', 'width', 'left')
@docstring._concatenate_original
@_snippet_manager
def barh(self, *args, **kwargs):
"""
%(plot.barh)s
"""
kwargs = _parse_vert(default_orientation='horizontal', **kwargs)
return self._apply_bar(*args, **kwargs)
def _apply_boxplot(
self, x, y, *,
mean=None, means=None, vert=True,
fill=None, marker=None, markersize=None,
**kwargs
):
"""
Apply the box plot.
"""
# Global and fill properties
kw = kwargs.copy()
_process_props(kw, 'patch')
linewidth = kw.pop('linewidth', rc['patch.linewidth'])
edgecolor = kw.pop('edgecolor', 'black')
fillcolor = kw.pop('facecolor', None)
fillalpha = kw.pop('alpha', None)
fill = fill is True or fillcolor is not None or fillalpha is not None
if fill and fillcolor is None: # TODO: support e.g. 'facecolor' cycle?
parser = self._get_patches_for_fill
fillcolor = parser.get_next_color()
fillalpha = _not_none(fillalpha, 1)
# Arist-specific properties
# NOTE: Output dict keys are plural but we use singular for keyword args
props = {}
for key in ('boxes', 'whiskers', 'caps', 'fliers', 'medians', 'means'):
prefix = key.rstrip('es') # singular form
props[key] = iprops = _pop_props(kw, 'line', prefix=prefix)
iprops.setdefault('color', edgecolor)
iprops.setdefault('linewidth', linewidth)
iprops.setdefault('markeredgecolor', edgecolor)
means = _not_none(mean=mean, means=means, showmeans=kw.get('showmeans'))
if means:
kw['showmeans'] = kw['meanline'] = True
# Call function
x, y, kw = self._standardize_1d(x, y, autoy=False, autoguide=False, vert=vert, **kw) # noqa: E501
kw.setdefault('positions', x)
obj = self._plot_safe('boxplot', y, vert=vert, **kw)
# Modify artist settings
for key, aprops in props.items():
if key not in obj: # possible if not rendered
continue
artists = obj[key]
if not isinstance(fillalpha, list):
fillalpha = [fillalpha] * len(artists)
if not isinstance(fillcolor, list):
fillcolor = [fillcolor] * len(artists)
for i, artist in enumerate(artists):
# Update lines used for boxplot components
# TODO: Test this thoroughly!
iprops = {
key: (
value[i // 2 if key in ('caps', 'whiskers') else i]
if isinstance(value, (list, ndarray))
else value
) for key, value in aprops.items()
}
artist.update(iprops)
# "Filled" boxplot by adding patch beneath line path
if key == 'boxes':
ifillcolor = fillcolor[i] # must stay within the if statement
ifillalpha = fillalpha[i]
if ifillcolor is not None or ifillalpha is not None:
patch = mpatches.PathPatch(
artist.get_path(),
linewidth=0, facecolor=ifillcolor, alpha=ifillalpha,
)
self.add_artist(patch)
# Outlier markers
if key == 'fliers':
if marker is not None:
artist.set_marker(marker)
if markersize is not None:
artist.set_markersize(markersize)
return obj
@_snippet_manager
def box(self, *args, **kwargs):
"""
%(plot.boxplot)s
"""
return self.boxplot(*args, **kwargs)
@_snippet_manager
def boxh(self, *args, **kwargs):
"""
%(plot.boxploth)s
"""
return self.boxploth(*args, **kwargs)
@_preprocess_data('positions', 'y')
@docstring._concatenate_original
@_snippet_manager
def boxplot(self, *args, **kwargs):
"""
%(plot.boxplot)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_boxplot(*args, **kwargs)
@_preprocess_data('positions', 'x')
@_snippet_manager
def boxploth(self, *args, **kwargs):
"""
%(plot.boxploth)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_boxplot(*args, **kwargs)
def _apply_violinplot(self, x, y, vert=True, **kwargs):
"""
Apply the violinplot.
"""
# Parse keyword args
kw = kwargs.copy()
_process_props(kw, 'patch')
linewidth = kw.pop('linewidth', rc['patch.linewidth'])
edgecolor = kw.pop('edgecolor', 'black')
fillcolor = kw.pop('facecolor', None)
fillalpha = kw.pop('alpha', None)
fillalpha = _not_none(fillalpha, 1)
kw.setdefault('capsize', 0) # caps are redundant for violin plots
kw.setdefault('means', kw.pop('showmeans', None)) # for _indicate_error
kw.setdefault('medians', kw.pop('showmedians', None))
if kw.pop('showextrema', None):
warnings._warn_proplot('Ignoring showextrema=True.')
# Parse and control error bars
x, y, kw = self._standardize_1d(x, y, autoy=False, autoguide=False, vert=vert, **kw) # noqa: E501
y, kw = _distribution_reduce(y, **kw)
*eb, kw = self._error_bars(x, y, vert=vert, default_boxes=True, **kw) # noqa: E501
kw = self._parse_cycle(**kw)
# Call function
kw.pop('labels', None) # already applied in _standardize_1d
kw.update({'showmeans': False, 'showmedians': False, 'showextrema': False})
kw.setdefault('positions', x)
y = kw.pop('distribution', None) # 'y' was changes in _distribution_reduce
obj = self._plot_safe('violinplot', y, vert=vert, **kw)
# Modify body settings
artists = (obj or {}).get('bodies', ())
if not isinstance(fillalpha, list):
fillalpha = [fillalpha] * len(artists)
if not isinstance(fillcolor, list):
fillcolor = [fillcolor] * len(artists)
if not isinstance(edgecolor, list):
edgecolor = [edgecolor] * len(artists)
for i, artist in enumerate(artists):
artist.set_linewidths(linewidth)
if fillalpha[i] is not None:
artist.set_alpha(fillalpha[i])
if fillcolor[i] is not None:
artist.set_facecolor(fillcolor[i])
if edgecolor[i] is not None:
artist.set_edgecolor(edgecolor[i])
return obj
@_snippet_manager
def violin(self, *args, **kwargs):
"""
%(plot.violinplot)s
"""
# WARNING: This disables use of 'violin' by users but
# probably very few people use this anyway.
if getattr(self, '_internal_call', None):
return super().violin(*args, **kwargs)
else:
return self.violinplot(*args, **kwargs)
@_snippet_manager
def violinh(self, *args, **kwargs):
"""
%(plot.violinploth)s
"""
return self.violinploth(*args, **kwargs)
@_preprocess_data('positions', 'y')
@docstring._concatenate_original
@_snippet_manager
def violinplot(self, *args, **kwargs):
"""
%(plot.violinplot)s
"""
kwargs = _parse_vert(default_vert=True, **kwargs)
return self._apply_violinplot(*args, **kwargs)
@_preprocess_data('positions', 'x')
@_snippet_manager
def violinploth(self, *args, **kwargs):
"""
%(plot.violinploth)s
"""
kwargs = _parse_vert(default_vert=False, **kwargs)
return self._apply_violinplot(*args, **kwargs)
def _apply_hist(self, xs, bins, *, orientation='vertical', **kwargs):
"""
Apply the histogram.
"""
# WARNING: Weirdly while Axes.bar() adds labels to the container
# Axes.hist() adds them to the first element in the container. The
# legend handle reader just looks for items with get_label() so we
# manually apply labels to the container on the result.
_, xs, kw = self._standardize_1d(xs, orientation=orientation, **kwargs)
objs = []
guide_kw = _pop_params(kw, self._add_queued_guide)
if bins is not None:
kw['bins'] = bins
for _, n, x, kw in self._iter_columns(xs, **kw):
kw = self._parse_cycle(n, **kw)
obj = self._plot_safe('hist', x, orientation=orientation, **kw)
if 'label' in kw:
for arg in obj[2]:
arg.set_label(kw['label'])
if hasattr(obj[2], 'set_label'): # recent mpl versions
obj[2].set_label(kw['label'])
objs.append(obj)
self._add_queued_guide(objs, **guide_kw)
return objs[0] if len(objs) == 1 else objs
@_preprocess_data('x', 'bins', keywords='weights')
@docstring._concatenate_original
@_snippet_manager
def hist(self, *args, **kwargs):
"""
%(plot.hist)s
"""
kwargs = _parse_vert(default_orientation='vertical', **kwargs)
return self._apply_hist(*args, **kwargs)
@_preprocess_data('y', 'bins', keywords='weights')
@_snippet_manager
def histh(self, *args, **kwargs):
"""
%(plot.histh)s
"""
kwargs = _parse_vert(default_orientation='horizontal', **kwargs)
return self._apply_hist(*args, **kwargs)
# WARNING: 'labels' and 'colors' no longer passed through `data` (seems like
# extremely niche usage... `data` variables should be data-like)
@_preprocess_data('x', 'explode')
@docstring._concatenate_original
@_snippet_manager
def pie(self, x, explode, *, labelpad=None, labeldistance=None, **kwargs):
"""
Plot a pie chart.
Parameters
----------
%(plot.args_1d_y)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cycle)s
%(plot.labels_1d)s
labelpad, labeldistance : float, optional
The distance at which labels are drawn in radial coordinates.
lw, linewidth, linewidths : float, optional
The edge width for the pie sectors.
ec, edgecolor, edgecolors : color-spec, optional
The edge color for the pie sectors.
See also
--------
matplotlib.axes.Axes.pie
"""
pad = _not_none(labeldistance=labeldistance, labelpad=labelpad, default=1.15)
props = _pop_props(kwargs, 'patch')
props.setdefault('edgecolor', 'k') # sensible default
_, x, kwargs = self._standardize_1d(
x, autox=False, autoy=False, **kwargs
)
kwargs = self._parse_cycle(**kwargs)
kwargs['labeldistance'] = pad
obj = self._plot_safe('pie', x, explode, wedgeprops=props, **kwargs)
return obj
@_preprocess_data('x', 'y', 'bins', keywords='weights')
@docstring._concatenate_original
@_snippet_manager
def hist2d(self, x, y, bins, **kwargs):
"""
Plot a standard 2D histogram.
Parameters
----------
%(plot.args_1d_y)s
bins : int or 2-tuple of int, or array-like or 2-tuple of array-like, optional
The bin count or sequence of bins for each dimension or both dimensions.
%(plot.weights)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.levels_auto)s
%(plot.labels_2d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.hist2d`.
See also
--------
PlotAxes.hist2d
matplotlib.axes.Axes.hexbin
"""
# Rely on pcolormesh() override for this.
if bins is not None:
kwargs['bins'] = bins
return super().hist2d(x, y, **kwargs)
# WARNING: breaking change from native 'C'
@_preprocess_data('x', 'y', 'weights')
@docstring._concatenate_original
@_snippet_manager
def hexbin(self, x, y, weights, **kwargs):
"""
Plot a 2D hexagonally binned histogram.
Parameters
----------
%(plot.args_1d_y)s
%(plot.weights)s
%(plot.args_1d_shared)s
Other parameters
----------------
%(plot.cmap_norm)s
%(plot.levels_manual)s
%(plot.levels_vlim)s
%(plot.labels_2d)s
%(plot.guide)s
**kwargs
Passed to `~matplotlib.axes.Axes.hexbin`.
See also
--------
PlotAxes.hist2d
matplotlib.axes.Axes.hexbin
"""
# WARNING: Cannot use automatic level generation here until counts are
# estimated. Inside _parse_levels if no manual levels were provided then
# _parse_autolev is skipped and args like levels=10 or locator=5 are ignored
x, y, kw = self._standardize_1d(x, y, autovalues=True, **kwargs)
_process_props(kw, 'collection') # takes LineCollection props
kw = self._parse_cmap(x, y, y, skip_autolev=True, default_discrete=False, **kw)
norm = kw.get('norm', None)
if norm is not None and not isinstance(norm, pcolors.DiscreteNorm):
norm.vmin = norm.vmax = None # remove nonsense values
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('hexbin', x, y, weights, **kw)
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def contour(self, x, y, z, **kwargs):
"""
%(plot.contour)s
"""
x, y, z, kw = self._standardize_2d(x, y, z, **kwargs)
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, minlength=1, contour_plot=True, **kw)
cmap = kw.pop('cmap', None)
if isinstance(cmap, pcolors.DiscreteColormap) and len(set(cmap.colors)) == 1:
kw['colors'] = cmap.colors[0] # otherwise negative linestyle fails
else:
kw['cmap'] = cmap
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
label = kw.pop('label', None)
m = self._plot_safe('contour', x, y, z, **kw)
m._legend_label = label
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def contourf(self, x, y, z, **kwargs):
"""
%(plot.contourf)s
"""
x, y, z, kw = self._standardize_2d(x, y, z, **kwargs)
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, contour_plot=True, **kw)
contour_kw = _pop_kwargs(kw, 'edgecolors', 'linewidths', 'linestyles')
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
label = kw.pop('label', None)
m = cm = self._plot_safe('contourf', x, y, z, **kw)
m._legend_label = label
self._fix_edges(m, **edgefix_kw, **contour_kw) # skipped if bool(contour_kw)
if contour_kw or labels_kw:
cm = self._plot_edges('contour', x, y, z, **kw, **contour_kw)
self._auto_labels(m, cm, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def pcolor(self, x, y, z, **kwargs):
"""
%(plot.pcolor)s
"""
x, y, z, kw = self._standardize_2d(x, y, z, edges=True, **kwargs)
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, to_centers=True, **kw)
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('pcolor', x, y, z, **kw)
self._fix_edges(m, **edgefix_kw, **kw)
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def pcolormesh(self, x, y, z, **kwargs):
"""
%(plot.pcolormesh)s
"""
x, y, z, kw = self._standardize_2d(x, y, z, edges=True, **kwargs)
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, to_centers=True, **kw)
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('pcolormesh', x, y, z, **kw)
self._fix_edges(m, **edgefix_kw, **kw)
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def pcolorfast(self, x, y, z, **kwargs):
"""
%(plot.pcolorfast)s
"""
x, y, z, kw = self._standardize_2d(x, y, z, edges=True, **kwargs)
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, to_centers=True, **kw)
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('pcolorfast', x, y, z, **kw)
self._fix_edges(m, **edgefix_kw, **kw)
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_snippet_manager
def heatmap(self, *args, aspect=None, **kwargs):
"""
%(plot.heatmap)s
"""
obj = self.pcolormesh(*args, default_discrete=False, **kwargs)
aspect = _not_none(aspect, rc['image.aspect'])
if self.name != 'proplot_cartesian':
warnings._warn_proplot(
'The heatmap() command is meant for CartesianAxes. '
'Please use pcolor() or pcolormesh() instead.'
)
else:
coords = getattr(obj, '_coordinates', None)
xlocator = ylocator = None
if coords is not None:
coords = 0.5 * (coords[1:, ...] + coords[:-1, ...])
coords = 0.5 * (coords[:, 1:, :] + coords[:, :-1, :])
xlocator, ylocator = coords[0, :, 0], coords[:, 0, 1]
kw = {'aspect': aspect, 'xgrid': False, 'ygrid': False}
if xlocator is not None and self.xaxis.isDefault_majloc:
kw['xlocator'] = xlocator
if ylocator is not None and self.yaxis.isDefault_majloc:
kw['ylocator'] = ylocator
if self.xaxis.isDefault_minloc:
kw['xtickminor'] = False
if self.yaxis.isDefault_minloc:
kw['ytickminor'] = False
self.format(**kw)
return obj
@_preprocess_data('x', 'y', 'u', 'v', ('c', 'color', 'colors'))
@docstring._concatenate_original
@_snippet_manager
def barbs(self, x, y, u, v, c, **kwargs):
"""
%(plot.barbs)s
"""
x, y, u, v, kw = self._standardize_2d(x, y, u, v, allow1d=True, autoguide=False, **kwargs) # noqa: E501
_process_props(kw, 'line') # applied to barbs
c, kw = self._parse_color(x, y, c, **kw)
if mcolors.is_color_like(c):
kw['barbcolor'], c = c, None
a = [x, y, u, v]
if c is not None:
a.append(c)
kw.pop('colorbar_kw', None) # added by _parse_cmap
m = self._plot_safe('barbs', *a, **kw)
return m
@_preprocess_data('x', 'y', 'u', 'v', ('c', 'color', 'colors'))
@docstring._concatenate_original
@_snippet_manager
def quiver(self, x, y, u, v, c, **kwargs):
"""
%(plot.quiver)s
"""
x, y, u, v, kw = self._standardize_2d(x, y, u, v, allow1d=True, autoguide=False, **kwargs) # noqa: E501
_process_props(kw, 'line') # applied to arrow outline
c, kw = self._parse_color(x, y, c, **kw)
color = None
if mcolors.is_color_like(c):
color, c = c, None
if color is not None:
kw['color'] = color
a = [x, y, u, v]
if c is not None:
a.append(c)
kw.pop('colorbar_kw', None) # added by _parse_cmap
m = self._plot_safe('quiver', *a, **kw)
return m
@_snippet_manager
def stream(self, *args, **kwargs):
"""
%(plot.stream)s
"""
return self.streamplot(*args, **kwargs)
# WARNING: breaking change from native streamplot() fifth positional arg 'density'
@_preprocess_data('x', 'y', 'u', 'v', ('c', 'color', 'colors'), keywords='start_points') # noqa: E501
@docstring._concatenate_original
@_snippet_manager
def streamplot(self, x, y, u, v, c, **kwargs):
"""
%(plot.stream)s
"""
x, y, u, v, kw = self._standardize_2d(x, y, u, v, **kwargs)
_process_props(kw, 'line') # applied to lines
c, kw = self._parse_color(x, y, c, **kw)
if c is None: # throws an error if color not provided
c = pcolors.to_hex(self._get_lines.get_next_color())
kw['color'] = c # always pass this
guide_kw = _pop_params(kw, self._add_queued_guide)
label = kw.pop('label', None)
m = self._plot_safe('streamplot', x, y, u, v, **kw)
m.lines.set_label(label) # the collection label
self._add_queued_guide(m.lines, **guide_kw) # lines inside StreamplotSet
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def tricontour(self, x, y, z, **kwargs):
"""
%(plot.tricontour)s
"""
kw = kwargs.copy()
if x is None or y is None or z is None:
raise ValueError('Three input arguments are required.')
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, minlength=1, contour_plot=True, **kw)
cmap = kw.pop('cmap', None)
if isinstance(cmap, pcolors.DiscreteColormap) and len(set(cmap.colors)) == 1:
kw['colors'] = cmap.colors[0] # otherwise negative linestyle fails
else:
kw['cmap'] = cmap
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
label = kw.pop('label', None)
m = self._plot_safe('tricontour', x, y, z, **kw)
m._legend_label = label
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def tricontourf(self, x, y, z, **kwargs):
"""
%(plot.tricontourf)s
"""
kw = kwargs.copy()
if x is None or y is None or z is None:
raise ValueError('Three input arguments are required.')
_process_props(kw, 'collection')
contour_kw = _pop_kwargs(kw, 'edgecolors', 'linewidths', 'linestyles')
kw = self._parse_cmap(x, y, z, contour_plot=True, **kw)
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
label = kw.pop('label', None)
m = cm = self._plot_safe('tricontourf', x, y, z, **kw)
m._legend_label = label
self._fix_edges(m, **edgefix_kw, **contour_kw) # skipped if bool(contour_kw)
if contour_kw or labels_kw:
cm = self._plot_edges('tricontour', x, y, z, **kw, **contour_kw)
self._auto_labels(m, cm, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
@_preprocess_data('x', 'y', 'z')
@docstring._concatenate_original
@_snippet_manager
def tripcolor(self, x, y, z, **kwargs):
"""
%(plot.tripcolor)s
"""
kw = kwargs.copy()
if x is None or y is None or z is None:
raise ValueError('Three input arguments are required.')
_process_props(kw, 'collection')
kw = self._parse_cmap(x, y, z, **kw)
edgefix_kw = _pop_params(kw, self._fix_edges)
labels_kw = _pop_params(kw, self._auto_labels)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('tripcolor', x, y, z, **kw)
self._fix_edges(m, **edgefix_kw, **kw)
self._auto_labels(m, **labels_kw)
self._add_queued_guide(m, **guide_kw)
return m
# WARNING: breaking change from native 'X'
@_preprocess_data('z')
@docstring._concatenate_original
@_snippet_manager
def imshow(self, z, **kwargs):
"""
%(plot.imshow)s
"""
kw = kwargs.copy()
kw = self._parse_cmap(z, default_discrete=False, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('imshow', z, **kw)
self._add_queued_guide(m, **guide_kw)
return m
# WARNING: breaking change from native 'Z'
@_preprocess_data('z')
@docstring._concatenate_original
@_snippet_manager
def matshow(self, z, **kwargs):
"""
%(plot.matshow)s
"""
kw = kwargs.copy()
kw = self._parse_cmap(z, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('matshow', z, **kw)
self._add_queued_guide(m, **guide_kw)
return m
# WARNING: breaking change from native 'Z'
@_preprocess_data('z')
@docstring._concatenate_original
@_snippet_manager
def spy(self, z, **kwargs):
"""
%(plot.spy)s
"""
kw = kwargs.copy()
_process_props(kw, 'line') # takes valid Line2D properties
default_cmap = pcolors.DiscreteColormap(['w', 'k'], '_no_name')
kw = self._parse_cmap(z, default_cmap=default_cmap, **kw)
guide_kw = _pop_params(kw, self._add_queued_guide)
m = self._plot_safe('spy', z, **kw)
self._add_queued_guide(m, **guide_kw)
return m
def set_prop_cycle(self, *args, **kwargs):
# Silent override. This is a strict superset of matplotlib functionality
# with one exception: you cannot use e.g. set_prop_cycle('color', color_list).
# Instead keyword args are required (but note naked positional arguments
# are assumed color arguments). Cycles are still validated in rcsetup.cycler()
cycle = self._active_cycle = constructor.Cycle(*args, **kwargs)
return super().set_prop_cycle(cycle) # set the property cycler after validation
# Rename the shorthands
boxes = warnings._rename_objs('0.8', boxes=box)
violins = warnings._rename_objs('0.8', violins=violin)
|
'''
Created on Dec 22, 2016
@author: micro
'''
import joblib
from sklearn.svm import LinearSVC
#from dataset import load_digits
#import hog
import argparse
import imutils
import numpy as np
import mahotas
import cv2
from skimage import feature
def load_digits(datasetPath):
data = np.genfromtxt(datasetPath, delimiter = ",",
dtype = "uint8")
target = data[:,0]
data = data[:, 1:].reshape(data.shape[0], 28, 28)
return (data, target)
def deskew(image, width):
(h,w) = image.shape[:2]
moments = cv2.moments(image)
skew = moments["mu11"] / moments["mu02"]
M = np.float32([[1, skew, -0.5 * w * skew], [0, 1, 0]])
image = cv2.warpAffine(image, M, (w, h),
flags = cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
image = imutils.resize(image, width = width)
return image
def center_extent(image, size):
(eW,eH) = size
if image.shape[1] > image.shape[0]:
image = imutils.resize(image, width = eW)
else:
image = imutils.resize(image, height = eH)
extent = np.zeros((eH, eH), dtype = "uint8")
offsetX = (eW - image.shape[1]) // 2
offsetY = (eH - image.shape[0]) // 2
extent[offsetY:offsetY + image.shape[0], offsetX:offsetX +
image.shape[1]] = image
CM = mahotas.center_of_mass(extent)
(cY, cX) = np.round(CM).astype("int32")
(dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
M = np.float32([[1, 0, dX], [0, 1, dY]])
extent = cv2.warpAffine(extent, M, size)
return extent
##################################################################################################################################################################
class HOG:
def __init__(self, orientations = 9, pixelsPerCell = (8,8),
cellsPerBlock = (3,3), transform = False):
self.orientations = orientations
self.pixelsPerCell = pixelsPerCell
self.cellsPerBlock = cellsPerBlock
self.transform = transform
def describe(self, image):
hist = feature.hog(image,
orientations = self.orientations,
pixels_per_cell = self.pixelsPerCell,
cells_per_block = self.cellsPerBlock,
transform_sqrt = self.transform)
return hist
################################################################################
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required = True,
help = "path to the dataset file")
ap.add_argument("-m", "--model", required = True,
help = "path to where the model will be stored")
args = vars(ap.parse_args())
(digits, target) = load_digits(args["dataset"])
data = []
hog = HOG(orientations = 18, pixelsPerCell = (10, 10),
cellsPerBlock = (1, 1), transform = True)
for image in digits:
image = deskew(image, 20)
image = center_extent(image, (20, 20))
hist = hog.describe(image)
data.append(hist)
model = LinearSVC(random_state = 42)
model.fit(data, target)
joblib.dump(model, args["model"])
##################################################################################################################################################################
|
def solve():
# Write your code here
if hidden_operation('random_text') == 'random_text':
print('or')
print(hidden_operation(None))
elif hidden_operation(False):
print('not')
else:
print('and')
print(hidden_operation(True))
|
import json
import unittest
import pyyoutube.models as models
class ChannelSectionModelTest(unittest.TestCase):
BASE_PATH = "testdata/modeldata/channel_sections/"
with open(BASE_PATH + "channel_section_info.json", "rb") as f:
CHANNEL_SECTION_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "channel_section_response.json", "rb") as f:
CHANNEL_SECTION_RESPONSE = json.loads(f.read().decode("utf-8"))
def testChannelSection(self) -> None:
m = models.ChannelSection.from_dict(self.CHANNEL_SECTION_INFO)
self.assertEqual(m.id, "UC_x5XG1OV2P6uZZ5FSM9Ttw.e-Fk7vMPqLE")
self.assertEqual(m.snippet.type, "multipleChannels")
self.assertEqual(len(m.contentDetails.channels), 16)
def testChannelSectionResponse(self) -> None:
m = models.ChannelSectionResponse.from_dict(self.CHANNEL_SECTION_RESPONSE)
self.assertEqual(m.kind, "youtube#channelSectionListResponse")
self.assertEqual(len(m.items), 10)
|
#! /usr/bin/env python3
"""Example map generator: King of the Hill Example
This script demonstrates vmflib2 by generating a basic "king of the hill" style
map. "King of the hill" is a game mode in Team Fortress 2 where each team
tries to maintain control of a central "control point" for some total defined
amount of time (before the other team does).
After this script executes, the map will be written to: koth_vmflib_example.vmf
This example highlights the use of TF2 game mechanics (in this case the use of
a control point and a goal timer). A simple implementation of team
spawn/resupply areas is also included.
https://developer.valvesoftware.com/wiki/Creating_a_Capture_Point
https://developer.valvesoftware.com/wiki/TF2/King_of_the_Hill
"""
from vmflib2 import *
from vmflib2.types import Vertex, Output, Origin
from vmflib2.tools import Block
import vmflib2.games.base as source
import vmflib2.games.tf as tf2
m = vmf.ValveMap()
la = source.LogicAuto(m)
gr = tf2.TfGamerules(m, targetname="game_rules")
tf2.TfLogicKoth(m, unlock_point=5)
la.add_outputs([
Output("OnMapSpawn", gr.targetname, "SetBlueTeamGoalString", "#koth_setup_goal"), # KOTH-specific
Output("OnMapSpawn", gr.targetname, "SetRedTeamGoalString", "#koth_setup_goal"), # KOTH-specific
Output("OnMapSpawn", gr.targetname, "SetBlueTeamRespawnWaveTime", 6), # KOTH-specific
Output("OnMapSpawn", gr.targetname, "SetRedTeamRespawnWaveTime", 6), # KOTH-specific
])
# Environment and lighting (these values come from Sky List on Valve dev wiki)
# Sun angle S Pitch Brightness Ambience
# 0 300 0 -20 238 218 181 250 224 188 122 250
m.world.skyname = 'sky_harvest_01'
light = source.LightEnvironment(m, angles="0 300 0", pitch=-20, _light="238 218 181 250", _ambient="224 188 122 250")
# Ground
ground = Block(Vertex(0, 0, -32), (2048, 2048, 64), 'nature/dirtground004')
m.add_solid(ground)
# Skybox
skybox = [
Block(Vertex(0, 0, 2048), (2048, 2048, 64)), # Ceiling
Block(Vertex(-1024, 0, 1024), (64, 2048, 2048)), # Left wall
Block(Vertex(1024, 0, 1024), (64, 2048, 2048)), # Right wall
Block(Vertex(0, 1024, 1024), (2048, 64, 2048)), # Forward wall
Block(Vertex(0, -1024, 1024), (2048, 64, 2048)) # Rear wall
]
for wall in skybox:
wall.set_material('tools/toolsskybox2d')
m.add_solids(skybox)
# Control point master entity
cp_master = tf2.TeamControlPointMaster(m, targetname="master_control_point", caplayout="0")
# Control point entity
cp = tf2.TeamControlPoint(m, targetname="control_point_1", point_printname="Central Point")
# Control point prop
cp_prop = source.PropDynamic(m, targetname="prop_cap_1", model="models/props_gameplay/cap_point_base.mdl")
# Capture area
cp_area = tf2.TriggerCaptureArea(m, area_cap_point=cp.targetname)
cp_area.children.append(Block(Vertex(0, 0, 128), (256, 256, 256),
"TOOLS/TOOLSTRIGGER"))
cp_area.add_outputs([
Output("OnCapTeam1", cp_prop.targetname, "Skin", 1), # Not KOTH-specific
Output("OnCapTeam2", cp_prop.targetname, "Skin", 2), # Not KOTH-specific
Output("OnCapTeam1", gr.targetname, "SetRedKothClockActive"), # KOTH-only
Output("OnCapTeam2", gr.targetname, "SetBlueKothClockActive") # KOTH-only
])
# Player spawn areas
# Define RED spawn
spawn_red = tf2.InfoPlayerTeamspawn(m, origin=Origin(900, 900, 5), angles="0 -135 0", TeamNum=2)
health_red = tf2.ItemHealthkitFull(m, origin=Origin(950, 910, 0), TeamNum=2)
ammo_red = tf2.ItemAmmopackFull(m, origin=Origin(910, 950, 0), TeamNum=2)
# Define BLU spawn
spawn_blu = tf2.InfoPlayerTeamspawn(m, origin=Origin(-900, -900, 5), angles="0 -135 0", TeamNum=3)
health_blu = tf2.ItemHealthkitFull(m, origin=Origin(-950, -910, 0), TeamNum=3)
ammo_blu = tf2.ItemAmmopackFull(m, origin=Origin(-910, -950, 0), TeamNum=3)
# Write the map to a file
m.write_vmf('koth_vmflib_example.vmf')
|
import unittest
from katas.kyu_8.hex_to_decimal import hex_to_dec
class HexToDecimalTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(hex_to_dec("1"), 1)
def test_equal_2(self):
self.assertEqual(hex_to_dec("a"), 10)
def test_equal_3(self):
self.assertEqual(hex_to_dec("10"), 16)
|
#!/usr/bin/env python
config = {
'username': 'cassandrarestapi',
'password': 'hcONvbgk6ec7E2Z2hZ0njuuS7lqWDzpBAov6G3d3HRV5Nx0KI1PNtA7Xzf0N0Z2KKdcm36GGtsGTr88jNiCUSg==',
'contactPoint': 'cassandrarestapi.cassandra.cosmos.azure.com',
'port':'10350'
}
|
from sklearn.metrics.pairwise import cosine_similarity
from typing import Any, Iterable, List, Optional, Set, Tuple
from utils.vectors import Vector
from utils import vectors
from utils.word import Word
from utils import eval_utils
from gensim import utils as genutils
import logging
import numpy as np
from scipy import stats
# Timing info for most_similar (100k words):
# Original version: 7.3s
# Normalized vectors: 3.4s
logger = logging.getLogger(__name__)
def most_similar(base_vector: Vector, words: List[Word]) -> List[Tuple[float, Word]]:
"""Finds n words with smallest cosine similarity to a given word"""
words_with_distance = [(vectors.cosine_similarity_normalized(base_vector, w.vector), w) for w in words]
# We want cosine similarity to be as large as possible (close to 1)
sorted_by_distance = sorted(words_with_distance, key=lambda t: t[0], reverse=True)
# Sonvx: remove duplications (not understand why yet, probably because the w2v?)
# sorted_by_distance = list(set(sorted_by_distance))
return sorted_by_distance
def print_most_similar(words: List[Word], text: str) -> None:
base_word = find_word(text, words)
if not base_word:
print("Unknown word: %s"%(text))
return
print("Words related to %s:" % (base_word.text))
sorted_by_distance = [
word.text for (dist, word) in
most_similar(base_word.vector, words)
if word.text.lower() != base_word.text.lower()
]
print(', '.join(sorted_by_distance[:10]))
def read_word() -> str:
return input("Type a word: ")
def find_word(text: str, words: List[Word]) -> Optional[Word]:
try:
return next(w for w in words if text == w.text)
except StopIteration:
return None
def closest_analogies_OLD(
left2: str, left1: str, right2: str, words: List[Word]
) -> List[Tuple[float, Word]]:
word_left1 = find_word(left1, words)
word_left2 = find_word(left2, words)
word_right2 = find_word(right2, words)
if (not word_left1) or (not word_left2) or (not word_right2):
return []
vector = vectors.add(
vectors.sub(word_left1.vector, word_left2.vector),
word_right2.vector)
closest = most_similar(vector, words)[:10]
def is_redundant(word: str) -> bool:
"""
Sometimes the two left vectors are so close the answer is e.g.
"shirt-clothing is like phone-phones". Skip 'phones' and get the next
suggestion, which might be more interesting.
"""
word_lower = word.lower()
return (
left1.lower() in word_lower or
left2.lower() in word_lower or
right2.lower() in word_lower)
closest_filtered = [(dist, w) for (dist, w) in closest if not is_redundant(w.text)]
return closest_filtered
def closest_analogies_vectors(
word_left2: Word, word_left1: Word, word_right2: Word, words: List[Word]) \
-> List[Tuple[float, Word]]:
"""
Sonvx:
:param word_left2:
:param word_left1:
:param word_right2:
:param words:
:param remove_redundancy: remove suggestions if they contain the given words.
:return:
"""
# print(">>>> Remove redundancy = ", remove_redundancy)
# input(">>>>")
vector = vectors.add(
vectors.sub(word_left1.vector, word_left2.vector),
word_right2.vector)
closest = most_similar(vector, words)[:10]
def is_redundant(word: str) -> bool:
"""
Sometimes the two left vectors are so close the answer is e.g.
"shirt-clothing is like phone-phones". Skip 'phones' and get the next
suggestion, which might be more interesting.
"""
word_lower = word.lower()
return (
word_left1.text.lower() in word_lower or
word_left2.text.lower() in word_lower or
word_right2.text.lower() in word_lower)
# It doesn't work this way for Vietnamese, so we try both of this to test for now
if False:
closest_filtered = [(dist, w) for (dist, w) in closest if not is_redundant(w.text)]
else:
closest_filtered = closest
return closest_filtered
def get_avg_vector(word, embedding_words):
if " " in word:
single_words = word.split(" ")
list_vector = []
for single_word in single_words:
word_vec = find_word(single_word, embedding_words)
if word_vec:
list_vector.append(word_vec.vector)
else:
# Try again with lowercase
single_word = single_word.lower()
word_vec = find_word(single_word, embedding_words)
if word_vec:
list_vector.append(word_vec.vector)
# print("list_vector: ", list_vector)
# input(">>>>>>>>")
returned_Word = Word(word, vectors.mean_list(list_vector), 1)
else:
returned_Word = find_word(word, embedding_words)
# print("Avg returned vector = ", returned_vector)
# input(">>>>")
return returned_Word
def run_paired_ttests(all_map_arr, embedding_names):
"""
Run Paired t-tests on MAP results
:param all_map_arr:
:param embedding_names:
:return:
"""
str_out = ""
num_embs = len(all_map_arr)
# Verify to make sure they have the same length
if all_map_arr and embedding_names:
for i in range(0, num_embs - 1):
for j in range(i + 1, num_embs):
if len(all_map_arr[i]) != len(all_map_arr[j]):
raise Exception("Two embedding (%s, %s) have different MAP list, sizes: %s vs. %s"
% (embedding_names[i], embedding_names[j], len(all_map_arr[i]), len(all_map_arr[j])))
else:
logging.error("Inputs are NULL")
result_str_ttest_arr = []
for i in range(0, num_embs - 1):
for j in range(i + 1, num_embs):
stat_test_ret = stats.ttest_rel(all_map_arr[i], all_map_arr[j])
# if stat_test_ret.pvalue >= 0.05:
result = "%s vs. %s: %s" % (embedding_names[i], embedding_names[j], stat_test_ret)
str_out += result + "\n"
return str_out
def eval_word_analogy_4_all_embeddings(word_analogies_file, embedding_names: List[str],
word_embeddings: List[List[Word]], output_file):
"""
Run word analogy for all embeddings
:param word_analogies_file:
:param embedding_names:
:param word_embeddings:
:param output_file:
:return:
"""
fwriter = open(output_file, "w")
idx = 0
all_map_arr = []
console_output_str = ""
category = ": | Word Analogy Task results\n"
fwriter.write(category)
console_output_str += category
for word_embedding in word_embeddings:
embedding_name = embedding_names[idx]
map_at_10, map_arr, result_str = eval_word_analogies(word_analogies_file, word_embedding, embedding_name)
all_map_arr.append(map_arr)
meta_info = "\nEmbedding: %s"%(embedding_names[idx])
fwriter.write(meta_info + "\n")
fwriter.write(result_str)
fwriter.write("MAP_arr = %s"%(map_arr))
fwriter.write("MAP@10 = %s" % (map_at_10))
fwriter.flush()
console_output_str += meta_info + "\n" + "MAP@10 = %s" % (map_at_10) + "\n"
idx += 1
# Getting significant Paired t-tests
category = "\n: | Paired t-tests results\n"
fwriter.write(category)
console_output_str += category
ttests_result = run_paired_ttests(all_map_arr, embedding_names)
console_output_str += ttests_result
fwriter.write(ttests_result)
fwriter.flush()
fwriter.close()
return console_output_str
def eval_word_analogies(word_analogies_file, words: List[Word], embedding_name):
"""
Sonvx: Evaluate word analogy for one embedding.
:param word_analogies_file:
:param words:
:return:
"""
# input("GO checking >>>>")
oov_counter, idx_cnt, is_vn_counter, phrase_cnt = 0, -1, 0, 0
sections, section = [], None
# map_arr = []
out_str = ""
map_ret_dict = {}
for line_no, line in enumerate(genutils.smart_open(word_analogies_file)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = genutils.to_unicode(line)
line = line.rstrip()
if line.startswith(': |'):
# a new section starts => store the old section
if section:
sections.append(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
# Count number of analogy to check
idx_cnt += 1
# Set default map value
map_ret_dict[idx_cnt] = 0.0
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, word_analogies_file))
try:
# a - b + c = expected
# Input: Baghdad | Irac | Bangkok | Thai_Lan
# Baghdad - Irac = Bangkok - Thai_Lan
# -> Baghdad - Irac + Thai_Lan = Bangkok
# =>
a, b, expected, c = [word for word in line.split(" | ")]
except ValueError:
logger.debug("SVX: ERROR skipping invalid line #%i in %s", line_no, word_analogies_file)
print("Line : ", line)
print("a, b, c, expected: %s, %s, %s, %s" % (a, b, c, expected))
# input(">>> Wait ...")
continue
# In case of Vietnamese, word analogy can be a phrase
if " " in expected:
print("INFO: we don't support to find word analogies for phrase for NOW.")
phrase_cnt += 1
continue
elif " " in a or " " in b or " " in c:
is_vn_counter += 1
word_left1 = get_avg_vector(a, words)
word_left2 = get_avg_vector(b, words)
word_right2 = get_avg_vector(c, words)
else:
word_left1 = find_word(a, words)
word_left2 = find_word(b, words)
word_right2 = find_word(c, words)
if (not word_left1) or (not word_left2) or (not word_right2):
logger.debug("SVX: skipping line #%i with OOV words: %s", line_no, line.strip())
oov_counter += 1
continue
# Write solable analogy to a file
# fsolveable_writer.write(line + "\n")
logger.debug("word_left1 = %s", word_left1.text)
logger.debug("word_left2 = %s", word_left2.text)
logger.debug("word_right2 = %s", word_right2.text)
# Start finding close word:
# Note: we can only find 1 expected word in Vietnamese for NOW
top10_candidate = closest_analogies_vectors(word_left2, word_left1,
word_right2, words)
list_candidate_arr = []
for tuple in top10_candidate:
list_candidate_arr.append(tuple[1].text)
logger.debug("Expected Word: %s, candidate = %s" % (expected, list_candidate_arr))
# input(">>>>>")
# Calculate MAP@10 score
this_map_result = eval_utils.mapk(expected, list_candidate_arr, word_level=True)
if this_map_result >= 0:
this_map_result = round(this_map_result, 6)
# map_arr[idx_cnt] = this_map_result
else:
this_map_result = 0.0
# map_arr.append(0.0)
# map_arr[idx_cnt] = this_map_result
map_ret_dict[idx_cnt] = this_map_result
if expected in list_candidate_arr:
section['correct'].append((a, b, c, expected))
out_line = "%s - %s + %s = ?; Expect: %s, candidate: %s" % \
(word_left1, word_left2, word_right2, expected, list_candidate_arr)
out_str += out_line + "\n"
# else:
# section['incorrect'].append((a, b, c, expected))
# fsolveable_writer.close()
if section:
# store the last section, too
sections.append(section)
map_arr = list(map_ret_dict.values())
logger.debug("map_arr = ", map_arr)
logger.debug("MAP_RET_DICT = ", map_ret_dict)
# input("Check result dict: >>>>>")
total = {
"Emb_Name: " + embedding_name + '/OOV/Total/VN_Solveable_Cases/VN_Phrase_Target':
[oov_counter, (idx_cnt + 1), is_vn_counter, phrase_cnt],
'MAP@10': np.mean(map_arr)
# ,
# 'section': 'total'
# ,
# 'correct': sum((s['correct'] for s in sections), []),
# 'incorrect': sum((s['incorrect'] for s in sections), []),
}
# print (out_str)
# print(total)
# logger.info(total)
sections.append(total)
sections_str = "\n%s\n" % sections
return np.mean(map_arr), map_arr, sections_str
def print_analogy(left2: str, left1: str, right2: str, words: List[Word]) -> None:
analogies = closest_analogies_OLD(left2, left1, right2, words)
if (len(analogies) == 0):
# print(f"{left2}-{left1} is like {right2}-?")
print("%s-%s is like %s-?"%(left2, left1, right2))
# man-king is like woman-king
# input: man is to king is like woman is to ___?(queen).
else:
(dist, w) = analogies[0]
# alternatives = ', '.join([f"{w.text} ({dist})" for (dist, w) in analogies])
# print(f"{left2}-{left1} is like {right2}-{w.text}")
print("%s-%s is like %s-%s"%(left2, left1, right2, w.text))
|
import datetime
import message_pb2
from confluent_kafka import DeserializingConsumer
from confluent_kafka.error import ValueDeserializationError
from confluent_kafka.schema_registry.protobuf import ProtobufDeserializer
from confluent_kafka.serialization import StringDeserializer
def format_datetime(epoch):
return datetime.datetime.fromtimestamp(epoch).strftime('%c')
def main():
topic = 'bankTransactions'
protobuf_deserializer = ProtobufDeserializer(message_pb2.Transaction)
string_deserializer = StringDeserializer('utf_8')
consumer_conf = {'bootstrap.servers': 'localhost:19092',
'key.deserializer': string_deserializer,
'value.deserializer': protobuf_deserializer,
'group.id': 'events-consumer',
'auto.offset.reset': 'earliest'}
consumer = DeserializingConsumer(consumer_conf)
consumer.subscribe([topic])
while True:
try:
try:
msg = consumer.poll(1.0)
except ValueDeserializationError:
print('confluent_kafka.error.ValueDeserializationError', flush=True)
if msg is None:
continue
transaction = msg.value()
if transaction is not None:
output_string = (
f"Transaction ID: {transaction.transaction_id} "
f"Account #: {transaction.account_number} "
f"Amount: {f'{transaction.amount:.2f} ':>8}"
f"Timestamp: {format_datetime(transaction.transaction_datetime.seconds)}"
)
print(output_string, flush=True)
except KeyboardInterrupt:
break
consumer.close()
if __name__ == '__main__':
main()
|
import sys
from scipy import stats
from numpy import *
import re
def statistic(file_a, bestv):
file1 = loadtxt(file_a) #open("a.txt", "r")
#sys.stdout.write(str(min(file1))+" "+str(max(file1))+" "+ str(mean(file1))+" "+str(std(file1))+" "+str(abs(mean(file1)-bestv))+" ")
sys.stdout.write(str(mean(file1))+" "+str(std(file1))+" ")
#sys.stdout.write(str(min(file1))+" "+str(max(file1))+" "+ str(mean(file1))+" "+str(std(file1))+" "+str(abs(mean(file1)-bestv))+" ")
sys.stdout.flush()
def process_instance(list_files):
bestv = -1000000
#bestv = 1000000
for i, val1 in enumerate(list_files):
bestv = max(bestv, mean(loadtxt(val1)))
# bestv = min(bestv, mean(loadtxt(val1)))
for i, val1 in enumerate(list_files):
statistic(val1, bestv)
print("")
##Load the entire file
list_files = open(sys.argv[1], "r")
#list_files = open("file", "r")
#print list_files.read()
#split the text by instance...
sys.stdout.write("min max mean std")
for inst in list_files.read().replace(" ", "").split('--'):
instance_list=[]
flag = 0
for line in inst.split('\n'):
if re.match(r'^\s*$', line):
continue
if flag == 0:
sys.stdout.write(line+" ")
sys.stdout.flush()
flag = 1
else:
instance_list.append(line)
process_instance(instance_list)
|
#!/usr/bin/env python
import imp, os, sys
here = os.path.dirname( os.path.abspath( __file__ ) )
chFilePath = os.path.join( os.path.dirname( here ) , "common", "CompileHelper.py" )
try:
fd = open( chFilePath )
except Exception, e:
print "Cannot open %s: %s" % ( chFilePath, e )
sys.exit( 1 )
chModule = imp.load_module( "CompileHelper", fd, chFilePath, ( ".py", "r", imp.PY_SOURCE ) )
fd.close()
chClass = getattr( chModule, "CompileHelper" )
ch = chClass( here )
versions = { 'sqlalchemy' : "0.9.8",
'fuse-python' : "0.2"}
ch.setPackageVersions( versions )
for package in versions:
packageToInstall = "%s>=%s" % ( package, versions[ package ] )
if not ch.easyInstall( packageToInstall ):
ch.ERROR( "Could not deploy %s with easy_install" % package )
if not ch.pip( packageToInstall ):
ch.ERROR( "Could not deploy %s with pip" % package )
sys.exit( 1 )
|
#!/usr/bin/python
print "Hello world"
print "and everyone else"
p = 5
p =p +2
p = p +1
print p
|
# encoding: utf-8
from src.config import FaqConfig
# from src.tfidf_transformer import generate_embedding
from src.skip_embedding import generate_embedding
from src.utils import query_item_to_dict
from src.utils import Cutter
from src.utils import QueryItem
import logging.config
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logging.config.fileConfig(fname='log.config', disable_existing_loggers=False)
def analysis(query, faq_config: FaqConfig):
logger = logging.getLogger('analysis')
query_item = QueryItem()
query_item.query = query
cutter = Cutter()
# tokens = cutter.cut(query)
tokens = cutter.cut_zi_and_remove_punc(query)
query_item.query_tokens_zi = tokens
tokens_jieba = cutter.cut_and_remove_punc(query)
query_item.query_tokens_jieba = tokens_jieba
text = ' '.join(query_item.query_tokens_jieba)
text_new = text.strip()
query_item.query_vec = generate_embedding(
text_new, faq_config.skip_embedding)
# query_item.query_vec = generate_embedding(
# query_item.query, faq_config.tfidf_transformer)
logger.info('analysis SUCCESS !')
logger.debug('query info : ' + str(query_item_to_dict(query_item)))
return query_item
if __name__ == '__main__':
pass
|
# 求可能获得出勤奖励的记录情况数量
# 定义 dp[i][j][k]表示前 i 天有 A 且结尾有连续 k 个 ‘L’ 的可奖励的出勤记录的数量
class Solution:
def checkRecord(self, n: int) -> int:
MOD = 10**9 + 7
# 长度,A 的数量,结尾连续 L 的数量
dp = [[[0, 0, 0], [0, 0, 0]] for _ in range(n + 1)]
dp[0][0][0] = 1
for i in range(1, n + 1):
# 以 P 结尾的数量
for j in range(0, 2):
for k in range(0, 3):
dp[i][j][0] = (dp[i][j][0] + dp[i - 1][j][k]) % MOD
# 以 A 结尾的数量
for k in range(0, 3):
dp[i][1][0] = (dp[i][1][0] + dp[i - 1][0][k]) % MOD
# 以 L 结尾的数量
for j in range(0, 2):
for k in range(1, 3):
dp[i][j][k] = (dp[i][j][k] + dp[i - 1][j][k - 1]) % MOD
total = 0
for j in range(0, 2):
for k in range(0, 3):
total += dp[n][j][k]
return total % MOD
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import numpy as np
#veriyi hazırlama
pf=pd.read_csv("../../Datasets/Cancer.csv")
X=pf.drop(['Unnamed: 32',"id","diagnosis"],axis=1)
Y=np.array(pd.get_dummies(pf['diagnosis'], drop_first=True)).reshape(X.shape[0])
print((Y.shape))
#veriyi bölme
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.21,random_state=42)
#modeli kurma
logistic_model=LogisticRegression()
logistic_model.fit(X_train,y_train)
#modelden tahmin tapma
pred=logistic_model.predict(X_test)
#ilkel başarı değeri
print(f"İlkel başarı değeri : {accuracy_score(y_test,pred)}") |
from django.contrib import admin
from photos.models import Photo, Comment
class PhotoAdmin(admin.ModelAdmin):
list_display = ['id', 'image', 'caption', 'posted_at', 'user']
admin.site.register(Photo, PhotoAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display = ['id', 'photo', 'user', 'description', 'commented_at']
admin.site.register(Comment, CommentAdmin)
|
import cv2 as cv
import numpy as np
import math
import queue
import random
gaussian_mask = np.array([
[0.0000,0.0000,0.0002,0.0000,0.0000],
[0.0000,0.0113,0.0837,0.0113,0.0000],
[0.0002,0.0837,0.6187,0.0837,0.0002],
[0.0000,0.0113,0.0837,0.0113,0.0000],
[0.0000,0.0000,0.0002,0.0000,0.0000]])
# 모델 이미지와 HSV공간으로 변환한 모델 이미지
roi = cv.imread('model.png')
hsv_roi = cv.cvtColor(roi,cv.COLOR_BGR2HSV)
# 타겟 이미지와 HSV공간으로 변환한 타겟 이미지
origin_target = cv.imread('4.jpg')
target = np.zeros((origin_target.shape[0],origin_target.shape[1],3),dtype=np.uint8)
for j in range(origin_target.shape[0]):
for i in range(origin_target.shape[1]):
for k in range(3):
sum = 0
for r in range(-2,3):
for c in range(-2,3):
y = j+c
x = i+r
if y >= 0 and y < origin_target.shape[0] and x >=0 and x < origin_target.shape[1]:
sum += gaussian_mask.item(c+2,r+2) * origin_target.item(y,x,k)
int(sum)
target.itemset(j,i,k,sum)
hsv_target = cv.cvtColor(target,cv.COLOR_BGR2HSV)
# 책에서 q단계로 줄인 2차원 히스토그램을 만든다. 여기서는 64를 사용하였다.
scale = 16
# 각각 q단계인 모델 HS히스토그램과 타겟 HS히스토그램을 만듬
model_hist = np.zeros((scale,scale))
# target_hist = np.zeros((scale,scale))
# 알고리즘 2-2를 사용하여 정규화된 히스토그램을 만든다.(모델만 만듬.)
for i in range(hsv_roi.shape[1]):
for j in range(hsv_roi.shape[0]):
model_hist[math.trunc(hsv_roi.item(j,i,0)/180*(scale-1)),math.trunc(hsv_roi.item(j,i,1)*(scale-1)/255)]+=1
for i in range(scale):
for j in range(scale):
norm_model_hist = model_hist/(hsv_roi.shape[0] * hsv_roi.shape[1])
norm_model_hist /= np.max(norm_model_hist) # 0~1 사이의 값으로 정규화.
# 타겟이미지와 같은 크기를 가지는 빈 이미지를 만든다.
backP_img = np.zeros((target.shape[0],target.shape[1]),np.float64)
backP_img_u = np.zeros((target.shape[0],target.shape[1]),np.uint8)
# 타겟 이미지의 픽셀값을 양자화 하여 모델 히스토그램으로 이동하여 나오는 색상값으로 역투영을 수행한다.
for i in range(hsv_target.shape[1]):
for j in range(hsv_target.shape[0]):
backP_img[j,i] = norm_model_hist[math.trunc(hsv_target.item(j,i,0)/180*(scale-1)),math.trunc(hsv_target.item(j,i,1)/255*(scale-1))]
backP_img_u[j,i] = backP_img[j,i] * 255
gray_hist = np.zeros(256)
# 정규화된 히스토그램 생성
norm_hist = np.zeros(256,dtype=np.float)
# 기본 히스토그램을 구한다.
for i in range(backP_img_u.shape[1]):
for j in range(backP_img_u.shape[0]):
gray_hist[backP_img_u.item(j,i)]+=1
# 정규화 시켜서 저장한다.
for i in range(256):
norm_hist[i] = gray_hist[i] / (backP_img_u.shape[0]*backP_img_u.shape[1])
vwlist = []
#vwlist2 = [] #without weight
for i in range(256): #T값을 결정하기위해서 1부터~255단계까지 바꾸어 나간다
w0 = 0.0
w1 = 0.0
u0 = 0.0
u1 = 0.0
v0 = 0.0
v1 = 0.0
for j in range(i):
w0 += norm_hist[j] #T가 1이라면 정규화된 값을 넣고
for j in range(i+1,256):
w1 += norm_hist[j] # 나머지값(2부터255까지 누적값 더하기)을 w1에넣고
if w0 != 0:
for j in range(i):
u0 += j*norm_hist[j]
u0 /= w0
for j in range(i):
v0 += norm_hist[j]*(j-u0)**2
v0 /= w0
if w1 != 0:
for j in range(i+1,256):
u1 += j*norm_hist[j]
u1 /= w1
for j in range(i+1,256):
v1 += norm_hist[j]*(j-u1)**2
v1 /= w1
v_within = w0 * v0 + w1 * v1
#v_within2 = v0 + v1 #without weight
vwlist.append(v_within)
#vwlist2.append(v_within2) #without weight
#if v_within < best:
#best = v_within
#best_t = i
#T_max[0] = i
#T_max[1] = v_within
#print(T_max)
#print(best, best_t)
#print(vwlist)
t_argmin = np.argmin(vwlist)
print(t_argmin, vwlist[t_argmin])
binary = np.zeros((backP_img_u.shape[0],backP_img_u.shape[1]),dtype=np.uint8)
for i in range(backP_img_u.shape[0]):
for j in range(backP_img_u.shape[1]):
if backP_img_u[i,j] >= t_argmin:
binary[i,j] = 255
else:
binary[i,j] = 0
# 책에 소개된 라벨링 함수
def flood_fill4(l,j,i,label):
Q = queue.Queue()
Q.put((j,i))
while not Q.empty():
x, y = Q.get()
if l.item(x,y) == -1:
left = right = x
while l.item(left-1,y) == -1:
left -=1
while l.item(right+1,y) == -1:
right +=1
# 첫 시작지점부터 왼쪽 오른쪽으로 쭉 이동하여 라벨링 되지않은 부분들을 찾는다.
for c in range(left,right+1):
# 왼쪽부터 오른쪽까지 이동하면서 라벨링을 수행한다. (같은 열에 연속된 것들 라벨링)
l.itemset(c,y,label)
# 만약 맨 왼쪽에서 아래나 위가 라벨링되어있지 않으면 큐에넣어 다음 반복에 라벨링을 수행한다.
# 뒤쪽의 조건(and (c == left or l.item(c-1,y-1) != -1))은 불필요한 좌표를 큐에 넣지않기 위함이다.
if l.item(c,y-1) == -1 and (c == left or l.item(c-1,y-1) != -1):
Q.put((c,y-1))
if l.item(c,y+1) == -1 and (c == left or l.item(c-1,y+1) != -1):
Q.put((c,y+1))
# 밑의 코드역시 위와 동일한 결과를 보여주지만, 불필요한 좌표들이 큐에 들어가게 됨
# if l.item(c,y-1) == -1 :
# Q.put((c,y-1))
# if l.item(c,y+1) == -1 :
# Q.put((c,y+1))
# 라벨링된 결과를 저장할 행렬을 만든다.
label_img = np.zeros((binary.shape[0],binary.shape[1]),dtype=np.int)
# 책에서 1은 -1 0은 0으로 복사한다고 했음. 여기서는 이진화 된 값인 255와 0으로 구별하여 255이면 -1 나머지는 0으로 복사
# 이미지 밖으로 나가는것을 막기위해 맨 바깥쪽 픽셀영역은 전부 0으로 복사
for i in range(binary.shape[1]):
for j in range(binary.shape[0]):
if j == 0 or j == binary.shape[0]-1 or i == 0 or i == binary.shape[1] - 1 :
label_img.itemset(j,i,0) # 경계 0으로 채우기. 끝을 검사하지 않기위해서
elif binary.item(j,i) == 255:
#label_img[j][i] = -1
label_img.itemset(j,i,-1) # 객체픽셀은 -1로 채움
else:
label_img.itemset(j,i,0) # 아니면 0
# label : 라벨링 값
label = 1
# -1인 지점을 찾아서 4방향 연결 알고리즘을 사용
for i in range(1,binary.shape[1]-1):
for j in range(1,binary.shape[0]-1):
if label_img.item(j,i) == -1:
flood_fill4(label_img,j,i,label)
label+=1
# 라벨링된 결과를 RGB채널의 새로운 이미지로 보여줌(객체별로 다른 색상을 적용) - 책에는 없음
new_img = np.zeros((binary.shape[0],binary.shape[1],3),dtype=np.uint8)
for i in range(binary.shape[1]):
for j in range(binary.shape[0]):
if label_img.item(j,i) > 0:
random.seed(label_img.item(j,i))
new_img.itemset(j,i,0,random.randint(0,255))
new_img.itemset(j,i,1,random.randint(0,255))
new_img.itemset(j,i,2,random.randint(0,255))
print(label)
bins = np.zeros((label))
for i in range(label_img.shape[0]):
for j in range(label_img.shape[1]):
if label_img.item(i,j) > 0: # 배경은 무시한다.
bins[label_img.item(i,j)]+=1 # 각 라벨별 빈도수 계산
print('face label: ',max(bins))
face = max(np.where(bins==max(bins))) # 가장 많은 빈도수의 라벨을 얼굴이라고 판단.
face_index = np.where(label_img==face) # 라벨 값이 얼굴 라벨값인 것의 위치를 모두 구함.
'''
face_index는 튜플 형식을 지닌다.
첫번째 원소는 행들의 집합. 두번째 원소는 열들의 집합이다.
'''
pt1_y = min(face_index[0]) # 얼굴 좌측상단 행값. 행값들중 최솟값
pt1_x = min(face_index[1]) # 얼굴 좌측상단 열값. 열값들중 최솟값
pt2_y = max(face_index[0]) # 얼굴 우측하단 행값. 행값들중 최댓값
pt2_x = max(face_index[1]) # 얼굴 우측하단 열값. 열값들중 최댓값
face_pos = ((pt1_x+pt2_x)//2,(pt1_y+pt2_y)//2) # 얼굴 중심 계산
cv.rectangle(new_img,(pt1_x,pt1_y),(pt2_x,pt2_y),(255,255,255),2) # 얼굴 영역 표시 그리기
cv.circle(new_img,face_pos,5,(0,0,255),cv.FILLED) # 얼굴 중심 표시 그리기
cv.imshow('label_img',new_img)
# 이미지를 출력한다. imshow함수는 입력되는 배열의 값이 소수일 경우 [0.0, 1.0]의 범위를 [0, 255]에 매핑하여 변환해 출력해준다.
cv.imshow('img',backP_img)
cv.imshow('binary img',binary)
cv.waitKey(0)
cv.destroyAllWindows() |
from tqsdk import TqApi
# 创建API实例
api = TqApi()
# 获得上期所 cu1906 的行情引用,当行情有变化时 quote 中的字段会对应更新
quote = api.get_quote("SHFE.cu1906")
# 输出 cu1906 的最新行情时间和最新价
print(quote.datetime, quote.last_price)
# 关闭api,释放资源
api.close()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname='C:\WINDOWS\Fonts\msgothic.ttc', size=14)
fname='OneDrive\デスクトップ\実験用\ex1.xlsx'
sname='Sheet1'
dataframe = pd.read_excel(fname, sheet_name=sname)
#print(dataframe)
values = dataframe.values
#print(values)
dataframe_label = dataframe.columns
ROOMTEMPERATURE1 = 23.0
ROOMTEMPERATURE2 = 25.2
ROOMTEMPERATURE3 = 24.5
time = values[:,0]
temperature1 = values[:,1] - ROOMTEMPERATURE1
temperature2 = values[:,2] - ROOMTEMPERATURE2
temperature3 = values[:,3] - ROOMTEMPERATURE3
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1)
ax.plot(time, temperature1, 'bo', label=dataframe_label[1][:])
ax.plot(time, temperature2, 'go', label=dataframe_label[2][:])
ax.plot(time, temperature3, 'ro', label=dataframe_label[3][:])
ax.set_yscale('log')
ax.grid(which='both')
ax.set_yticks(np.arange(35.0, 70.0, 10.0))
ax.set_xlabel('経過時間(分)', fontproperties=fp)
ax.set_ylabel('(測定温度‐室温)(℃)', fontproperties=fp)
ax.legend(prop=fp)
plt.show()
|
"""Unit test for appevents.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
from tests.testutils import mockzk
import kazoo
import mock
from treadmill import appevents
from treadmill import zkutils
from treadmill.apptrace import events
class AppeventsTest(mockzk.MockZookeeperTestCase):
"""Tests for teadmill.appevents."""
def setUp(self):
super(AppeventsTest, self).setUp()
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
super(AppeventsTest, self).tearDown()
@mock.patch('time.time', mock.Mock(return_value=100))
@mock.patch('treadmill.appevents._HOSTNAME', 'baz')
def test_post(self):
"""Test appevents.post."""
# Disable W0212(protected-access)
# pylint: disable=W0212
zkclient_mock = mock.Mock()
zkclient_mock.get_children.return_value = []
watcher = appevents.AppEventsWatcher(zkclient_mock, self.root)
appevents.post(
self.root,
events.PendingTraceEvent(
instanceid='foo.bar#123',
why='created',
)
)
path = os.path.join(
self.root, '100,foo.bar#123,pending,created'
)
self.assertTrue(os.path.exists(path))
watcher._on_created(path)
zkclient_mock.create.assert_called_once_with(
'/trace/007B/foo.bar#123,100,baz,pending,created',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
)
zkclient_mock.reset_mock()
appevents.post(
self.root,
events.PendingDeleteTraceEvent(
instanceid='foo.bar#123',
why='deleted'
)
)
path = os.path.join(
self.root, '100,foo.bar#123,pending_delete,deleted'
)
self.assertTrue(os.path.exists(path))
watcher._on_created(path)
zkclient_mock.create.assert_called_once_with(
'/trace/007B/foo.bar#123,100,baz,pending_delete,deleted',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
)
zkclient_mock.reset_mock()
appevents.post(
self.root,
events.AbortedTraceEvent(
instanceid='foo.bar#123',
why='test'
)
)
path = os.path.join(
self.root, '100,foo.bar#123,aborted,test'
)
self.assertTrue(os.path.exists(path))
watcher._on_created(path)
self.assertEqual(zkclient_mock.create.call_args_list, [
mock.call(
'/trace/007B/foo.bar#123,100,baz,aborted,test',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
),
mock.call(
'/finished/foo.bar#123',
b'{data: test, host: baz, state: aborted, when: \'100\'}\n',
makepath=True,
ephemeral=False,
acl=mock.ANY,
sequence=False
)
])
@mock.patch('time.time', mock.Mock(return_value=100))
@mock.patch('treadmill.appevents._HOSTNAME', 'baz')
def test_post_zk(self):
"""Test appevents.post.zk."""
zkclient_mock = mock.Mock()
zkclient_mock.get_children.return_value = []
appevents.post_zk(
zkclient_mock,
events.PendingTraceEvent(
instanceid='foo.bar#123',
why='created',
payload=''
)
)
zkclient_mock.create.assert_called_once_with(
'/trace/007B/foo.bar#123,100,baz,pending,created',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
)
zkclient_mock.reset_mock()
appevents.post_zk(
zkclient_mock,
events.PendingDeleteTraceEvent(
instanceid='foo.bar#123',
why='deleted'
)
)
zkclient_mock.create.assert_called_once_with(
'/trace/007B/foo.bar#123,100,baz,pending_delete,deleted',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
)
zkclient_mock.reset_mock()
appevents.post_zk(
zkclient_mock,
events.AbortedTraceEvent(
instanceid='foo.bar#123',
why='test'
)
)
zkclient_mock.create.assert_has_calls([
mock.call(
'/trace/007B/foo.bar#123,100,baz,aborted,test',
b'',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
),
mock.call(
'/finished/foo.bar#123',
b'{data: test, host: baz, state: aborted, when: \'100\'}\n',
ephemeral=False, makepath=True, sequence=False,
acl=mock.ANY
)
])
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('treadmill.zkutils.ensure_deleted', mock.Mock())
@mock.patch('treadmill.appevents._HOSTNAME', 'host_x')
def test_unschedule(self):
"""Tests unschedule when server owns placement."""
# Disable W0212: accessing protected members.
# pylint: disable=W0212
zk_content = {
'placement': {
'host_x': {
'app#1': {},
},
'host_y': {
},
},
'scheduled': {
'app#1': {
},
},
}
self.make_mock_zk(zk_content)
zkclient = kazoo.client.KazooClient()
appevents._unschedule(zkclient, 'app#1')
zkutils.ensure_deleted.assert_called_with(zkclient, '/scheduled/app#1')
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('treadmill.zkutils.ensure_deleted', mock.Mock())
@mock.patch('treadmill.appevents._HOSTNAME', 'host_x')
def test_unschedule_stale(self):
"""Tests unschedule when server does not own placement."""
# Disable W0212: accessing protected members.
# pylint: disable=W0212
zk_content = {
'placement': {
'host_x': {
},
'host_y': {
'app#1': {},
},
},
'scheduled': {
'app#1': {
},
},
}
self.make_mock_zk(zk_content)
zkclient = kazoo.client.KazooClient()
appevents._unschedule(zkclient, 'app#1')
self.assertFalse(zkutils.ensure_deleted.called)
if __name__ == '__main__':
unittest.main()
|
"""Calculator class."""
# Day 17: More Exceptions:
# Practice throwing and propagation an exception.
# Task:
# Write a Calculator class with a single method: power(int, int).
class Calculator():
# Returns the n^p, if n or p are negative throws exception.
def power(self, n, p):
"""
Take two integers, n and p, and returns the integer of n^p
if either n or p is negative, then the method throws an exception.
"""
if n < 0 or p < 0:
raise Exception('n and p should be non-negative')
return n ** p
# main
if __name__ == '__main__':
myCalculator = Calculator()
T = int(raw_input())
for i in range(T):
n, p = map(int, raw_input().split())
try:
ans = myCalculator.power(n, p)
print(ans)
except Exception, e:
print e
|
'''******************************************
ATHON
Programa de Introdução a Linguagem Python
Disiplina: Lógica de Programação
Professor: Francisco Tesifom Munhoz
Data: Primeiro Semestre 2021
*********************************************
Atividade: Lista 2 (Ex 5)
Autor: Yuri Pellini
Data: 12 de Maio de 2021
Comentários:
******************************************'''
#Entrada
Num=float(input("Digite um número:"))
# Saida
if(Num>20):
print("Maior que 20")
else:
if(Num==20):
print("Igual a 20")
else:
print("Menor que 20") |
from .models import Device, Position
from .serializers import DeviceSerializer, PositionSerializer
from rest_framework import viewsets, status, views
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import action, api_view
from rest_framework.views import APIView
import re
from datetime import datetime
def isValidData(data, regex):
pattern = re.compile(regex)
match = pattern.search(data)
return match
class DeviceViewSet(viewsets.ModelViewSet):
permission_classes = ( IsAuthenticated, )
serializer_class = DeviceSerializer
lookup_url_kwarg = 'serial'
lookup_field = 'serial'
def get_queryset(self):
user = self.request.user
if not user.is_superuser:
return Device.objects.filter(user = user)
return Device.objects.all()
def getPartsDataDevice(self, value):
dict_device = dict(
serial = value,
typee = value[0],
status = 'I',
)
return dict_device
def create(self, request):
user = request.user
serial = request.data.get('serial', None)
regex = '([SGM][0-9]{3})'
match = isValidData(serial, regex)
if match:
if not user.is_superuser:
try:
device = Device.objects.get(serial = serial)
if not device.user:
device.user = user
device.status = 'H'
device.save()
serializer = DeviceSerializer(device)
return Response(serializer.data, status = status.HTTP_200_OK)
message = "El dispositivo ya ha sido asignado a otro usuario"
return Response(status = status.HTTP_403_FORBIDDEN)
except Exception as e:
message = "El dispositivo no se encuentra registrado para su uso"
return Response(message, status = status.HTTP_403_FORBIDDEN)
parts = self.getPartsDataDevice(match.group(0))
serializer = DeviceSerializer(data = parts)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status = status.HTTP_201_CREATED)
message = 'El dispositivo que ha introducido ya existe'
return Response(message, status = status.HTTP_400_BAD_REQUEST)
def isValidRange(self, init, final):
return init and final
@action(detail = True, methods = ['get'])
def positions(self, *args, **kwargs):
filter_init = self.request.query_params.get('init', None)
filter_final = self.request.query_params.get('final', None)
last = bool(self.request.query_params.get('last', None))
valid_range = False
params_to_found = dict(
device = kwargs.get('serial', '')
)
if last:
params_to_found['last'] = last
if self.isValidRange(filter_init, filter_final):
valid_range = True
params_to_found['init'] = filter_init
params_to_found['final'] = filter_final
if params_to_found['device']:
device_positions = Position.positions.getPositionsForRangeDate(**params_to_found, byRange = valid_range)
if device_positions:
if filter_init and filter_final and not last:
serializer = PositionSerializer(device_positions, many = True)
elif not filter_init and not filter_final and last:
serializer = PositionSerializer(device_positions)
elif not filter_init and not filter_final and not last:
positions = Position.objects.filter(device = params_to_found['device'])
serializer = PositionSerializer(positions, many = True)
else:
return Response("Los parametros de consulta no concuerdan")
return Response(serializer.data)
message = "No se ha proporcionado un serial valido de dispositivo"
return Response(message = message, status = status.HTTP_404_NOT_FOUND)
class PositionView(APIView):
queryset = Position.objects.all()
def obtainPartsDataPosition(self, values):
dict_parts = dict(
serial = values[0],
latitude = float(values[1]) + float(values[2]),
longitude = float(values[3]) + float(values[4]),
c = float(values[5]) + float(values[6])
)
return dict_parts
def savePositionDevice(self, **kwargs):
serial = kwargs.get('serial', None)
device = Device.objects.get(serial = serial)
del kwargs['serial']
position = Position.objects.create(
device = device,
**kwargs
)
return device
def get(self, request):
data = request.query_params.get('data', None)
if data:
regex = '([SGM][0-9]{3})(-?[0-9]+[.][0-9]+)-?([0-9]+[.][0-9]+)(-?[0-9]+[.][0-9]+)-?([0-9]+[.][0-9]+)(-?[0-9]+[.][0-9]+)-?([0-9]{4})'
match = isValidData(data, regex)
if match :
parts = self.obtainPartsDataPosition(match.groups())
device = self.savePositionDevice(**parts)
#if device.user: mandar notificacion al cliente
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
|
# Use environment variables to configure database and swift access
import os
from developer_portal.settings import *
DEBUG=os.environ.get('DEBUG_MODE', False)=="True"
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '127.0.0.1').split(',')
SECRET_KEY=os.environ.get('SECRET_KEY', '')
# Use original HOST header, so cn.developer.ubuntu.com redirects work
#USE_X_FORWARDED_HOST=True
# Database configs
import dj_database_url
DATABASES['default'].update(dj_database_url.config())
# SwiftStorage configs
INSTALLED_APPS.append('swiftstorage')
OS_USERNAME = os.environ.get('OS_USERNAME', '')
OS_PASSWORD = os.environ.get('OS_PASSWORD', '')
OS_AUTH_URL = os.environ.get('OS_AUTH_URL', '')
OS_REGION_NAME = os.environ.get('OS_REGION_NAME', '')
OS_TENANT_NAME = os.environ.get('OS_TENANT_NAME', '')
SWIFT_CONTAINER_NAME=os.environ.get('SWIFT_CONTAINER_NAME', 'devportal_uploaded')
DEFAULT_FILE_STORAGE = "swiftstorage.storage.SwiftStorage"
SWIFT_STATICCONTAINER_NAME=os.environ.get('SWIFT_STATICCONTAINER_NAME', 'devportal_static')
SWIFT_STATICFILE_PREFIX=''
STATICFILES_STORAGE = 'swiftstorage.storage.SwiftStaticStorage'
MEDIA_URL = os.environ.get('SWIFT_URL_BASE', '/media/') + "/%s/" % SWIFT_CONTAINER_NAME
STATIC_URL = os.environ.get('SWIFT_URL_BASE', '/static/') + "/%s/" % SWIFT_STATICCONTAINER_NAME
ASSETS_URL = os.environ.get('ASSETS_URL_BASE', '//assets.ubuntu.com/')
CACHE_MIDDLEWARE_SECONDS = 3600
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'errors': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': '../../logs/django_errors.log',
},
},
'loggers': {
'django': {
'handlers': ['errors'],
'level': 'ERROR',
'propagate': True,
},
},
}
|
import ast
thing = ["['bank of america']", '[]', '[]', "['technopark kollam']"]
for org_list in thing:
print(ast.literal_eval(org_list))
# print((thing))
|
"""
Plugin for Rackspace cloud load balancer mock.
"""
from mimic.rest.loadbalancer_api import LoadBalancerApi, LoadBalancerControlApi
loadbalancer = LoadBalancerApi()
loadbalancer_control = LoadBalancerControlApi(lb_api=loadbalancer)
|
import random
import math
# -- Utility functions -- #
def sigmoid(i):
return 1 / (1 + math.exp(-i))
def dot(v, w):
return sum([i*j for i,j in zip(v,w)])
class Neuron:
def __init__(self, n_inputs = 1, learning_rate = 0.3):
bound = 2.4/n_inputs
self.weights = [random.uniform(-bound, bound) for _ in range(n_inputs)]
self.previous_delta = 0
self.threshold = random.uniform(-bound, bound)
self.learning_rate = learning_rate
self.adaptive_factor = 1
def adjust_weight_and_threshold(self, delta, inputs_vector):
momentum = 0.5 * self.previous_delta
self.adaptive_factor += (delta - self.previous_delta)
self.threshold -= self.learning_rate * delta
for i, input_value in enumerate(inputs_vector):
# Add adjustment plus a momentumm factor
self.weights[i] += (self.learning_rate * delta * input_value * self.adaptive_factor + momentum)
self.previous_delta = delta
def output(self, input_vector):
return sigmoid(dot(input_vector, self.weights) - self.threshold)
# -- Neural network functions -- #
def feed_forward(n_network, input_v):
layered_outputs = []
for layer in n_network:
output_v = [neuron.output(input_v) for neuron in layer]
layered_outputs.append(output_v)
input_v = output_v
return layered_outputs
def back_propagate(neural_network, input_v, target):
outer_layer = neural_network[1]
hidden_layer = neural_network[0]
hidden_output_v, final_output_v = feed_forward(neural_network, input_v)
# get adjustment vector for output layer
error_v = [target - output for output in final_output_v]
error_adjusts = [error * output * (1 - output) for error, output in zip(error_v, final_output_v)]
# adjust weight & threshold for final output layer
for i, outer_neuron in enumerate(outer_layer):
outer_neuron.adjust_weight_and_threshold(error_adjusts[i], hidden_output_v)
# get adjustment vector for hidden layer
feedback_ratio_v = [dot(error_adjusts, [neuron.weights[i] for neuron in outer_layer])
for i in range(len(hidden_layer))]
hidden_adjusts = [hidden_output * (1 - hidden_output) * feeback_ratio
for hidden_output, feeback_ratio in zip(hidden_output_v, feedback_ratio_v)]
# print("Feedback ratio {}, hidden_adjusts {}".format(feedback_ratio_v, hidden_adjusts))
# adjust weight & threshold for hidden layer
for i, hidden_neuron in enumerate(hidden_layer):
hidden_neuron.adjust_weight_and_threshold(hidden_adjusts[i], input_v)
def classify(neural_network, input):
return feed_forward(neural_network, input)[-1]
def network_factory(n_inputs, n_hidden, n_outputs, learning_rate):
hidden_layer = [Neuron(n_inputs=n_inputs, learning_rate=learning_rate) for _ in range(n_inputs)]
output_layer = [Neuron(n_inputs=len(hidden_layer), learning_rate=learning_rate) for _ in range(n_outputs)]
return [hidden_layer, output_layer]
if __name__ == "__main__":
data = [
# Black
[0.1, 1],
[0.33, 0.98],
[0.7, 0.67],
[0.9, 0.4],
# White
[0.05, 0.72],
[0.25, 0.55],
[0.42, 0.12],
[0.6, 0.3]
]
target = [1,1,1,1,0,0,0,0]
def stepper(start, stop, step):
i = start
while i < stop:
yield i
i += step
alphas = []
for a in stepper(0.8, 1.6, 0.02):
network = network_factory(n_inputs=2,n_hidden=2,n_outputs=1, learning_rate=a)
for i in range(3500):
error = 0
for d, t in zip(data, target):
back_propagate(network, d, t)
error += (classify(network, d)[0] - t) ** 2
if error < 0.01:
alphas.append({'a': a, 'iters': i})
break
for alpha in alphas:
print("Alpha {0} :: {1}".format(alpha['a'], alpha['iters']))
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path("",views.index,name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("Form_page",views.Form_page,name="Form_page"),
path("Form",views.Form,name="Form"),
path("Search",views.Search,name="Search"),
path("api",views.api,name="api"),
path("profile_pic/<int:user_id>",views.profile_pic,name="profile_pic"),
path("add_like/<int:image_id>",views.add_like,name="add_like"),
path("collection",views.collection,name="collection"),
path("add_collection/<int:image_id>",views.add_collection,name="add_collection"),
path("remove_collection/<int:image_id>",views.remove_collection,name="remove_collection"),
path("edit_flip/<int:image_id>",views.edit_flip,name="edit_flip"),
path("edit_contrast/<int:image_id>",views.edit_contrast,name="edit_contrast"),
path("edit_bw/<int:image_id>",views.edit_bw,name="edit_bw"),
path("edit_median/<int:image_id>",views.edit_median,name="edit_median"),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
import pygame, sys
import random
from neurodot_present.present_lib import Screen, FixationCross, CheckerBoardFlasher, bell, UserEscape, run_start_sequence, run_stop_sequence
pygame.init()
TASK_DURATION = 30 #seconds
FC = FixationCross()
try:
#start sequence
run_start_sequence()
#setup task
CBF = CheckerBoardFlasher(flash_rate=0) #no flashing
CBF.setup_checkerboard(64)
black_SCR = Screen(color = "black",fixation_cross = FC)
#run sequence
CBF.run(duration = TASK_DURATION, vsync_value = 1)
black_SCR.run(duration = 1, vsync_value = 0)
bell()
black_SCR.run(duration = 5, vsync_value = 0)
black_SCR.run(duration = TASK_DURATION, vsync_value = 2)
black_SCR.run(duration = 1, vsync_value = 0)
bell()
except UserEscape:
print "User stopped the sequence"
except Exception, err:
raise err
finally:
#stop sequence
run_stop_sequence()
pygame.quit()
sys.exit()
|
import requests
from requests.exceptions import RequestException
def get_page(url):
#user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36'
#header = {}
#header['user_agent'] = user_agent
headers = {'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36'}
try:
response = requests.get(url, headers = headers)
print(response)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
if __name__ == '__main__':
url = "https://movie.douban.com/top250"
html = get_page(url)
print(html)
|
from rest_framework import serializers
class ImgType(object):
def __init__(self, image, **kwargs):
self.image = image
def __unicode__(self):
return str(self)
def __str__(self):
return str(self).encode('utf-8')
def __getitem__(self, item):
return getattr(self, item)
class ImgSerializer(serializers.Serializer):
image = serializers.ImageField()
def create(self, validated_data):
return ImgType(**validated_data)
|
# -*- coding: utf-8 -*-
import time
import os.path
from jinja2 import Environment
import subprocess
from celery.utils.log import get_logger
from architect.repository.client import BaseClient
from architect.repository.models import Resource
logger = get_logger(__name__)
class BbbClient(BaseClient):
def __init__(self, **kwargs):
super(BbbClient, self).__init__(**kwargs)
def check_status(self):
return True
def get_image_types(self):
return (
('bbb-armhf-debian-stretch-4.9', 'BeagleBone Black ARM, Debian Stretch, kernel 4.9'),
('bbb-armhf-debian-stretch-4.14', 'BeagleBone Black ARM, Debian Stretch, kernel 4.14'),
('bbb-armhf-debian-buster-4.14', 'BeagleBone Black ARM, Debian Buster, kernel 4.14'),
('bbx15-armhf-debian-stretch-4.9', 'BeagleBoard X15 ARM, Debian Stretch, kernel 4.9'),
('bbx15-armhf-debian-stretch-4.14', 'BeagleBoard X15 ARM, Debian Stretch, kernel 4.14'),
('bbx15-armhf-debian-buster-4.14', 'BeagleBoard X15 ARM, Debian Buster, kernel 4.14'),
)
def get_script_file(self, config_context):
platform = config_context['type'].split('-')[0]
script_file = 'cd {}; ./gen-image.sh {} {} {} {}'.format(self.metadata['builder_dir'],
config_context['image_name'],
config_context['hostname'],
platform,
self.metadata['image_dir'])
return script_file
def get_config_file(self, image):
config_file = '{}/configs/{}.conf'.format(self.metadata['builder_dir'],
image)
return config_file
def get_config_template(self):
base_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(base_path, "templates/config.sh")
with open(path) as file_handler:
data = file_handler.read()
return data
def get_image_block_map(self, image_name):
map_path = '{}/{}.bmap'.format(self.metadata['image_dir'],
image_name)
if os.path.isfile(map_path):
with open(map_path) as file_handler:
return file_handler.read()
return None
def get_image_location(self, image_name):
return '{}/{}.img'.format(self.metadata['image_dir'],
image_name)
def get_image_size(self, image_name):
image_path = '{}/{}.img'.format(self.metadata['image_dir'],
image_name)
if os.path.isfile(image_path):
return os.path.getsize(image_path)
else:
return None
def delete_image(self, image_name):
image_path = '{}/{}.img'.format(self.metadata['image_dir'],
image_name)
map_path = '{}/{}.bmap'.format(self.metadata['image_dir'],
image_name)
if os.path.isfile(image_path):
os.remove(image_path)
if os.path.isfile(map_path):
os.remove(map_path)
def generate_image(self, config_context):
config_context['repository'] = self.metadata
script_file = self.get_script_file(config_context)
config_template = self.get_config_template()
config_content = Environment().from_string(config_template).render(config_context)
with open(self.get_config_file(config_context['image_name']), "w+") as file_handler:
file_handler.write(config_content)
duration = None
try:
start = time.time()
cmd_output = subprocess.check_output(script_file,
shell=True,
stderr=subprocess.STDOUT).decode('UTF-8')
end = time.time()
duration = end - start
except subprocess.CalledProcessError as ex:
cmd_output = ex.output.decode('UTF-8')
image_path = os.path.join(self.metadata['image_dir'], '{}.img'.format(config_context['image_name']))
image = Resource.objects.get(name=config_context['image_name'])
cache = {
'config': config_content,
'command': cmd_output,
'duration': duration
}
if os.path.isfile(image_path):
image.status = 'active'
cache['block_map'] = self.get_image_block_map(config_context['image_name'])
cache['image_size'] = self.get_image_size(config_context['image_name'])
else:
image.status = 'error'
cache['block_map'] = None
image.cache = cache
image.save()
return True
|
n = int(input())
for first_letter in range(97, 97+n):
for second_letter in range(97, 97+n):
for third_letter in range(97, 97+n):
print(chr(first_letter) + chr(second_letter) + chr(third_letter))
|
from datetime import date
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.http import HttpResponse
from django.template import RequestContext, loader
from django.contrib.auth.decorators import login_required
from requests.models import Comment, Request
@login_required
def index(request):
latest_request_list = Request.objects.order_by('-pub_date')[:5]
context = {
'latest_request_list': latest_request_list,
}
return render(request, 'requests/index.html', context)
def detail(request, request_id):
try:
r = Request.objects.get(pk=request_id)
except Request.DoesNotExist:
raise Http404
return render(request, 'requests/detail.html', {'request': r})
def results(request, request_id):
return HttpResponse("You're looking at the results of request %s." % request_id)
@login_required
def comment(request, request_id):
# Fetch the Request record.
r = get_object_or_404(Request, pk=request_id)
# Fetch the comment that the user entered.
c = request.POST['comment'];
# Create a new Comment record.
cr = Comment(request=r, by=request.user, comment=c, pub_date=date.today())
cr.save()
# Redisplay the request form.
return render(request, 'requests/detail.html', {
'request': r,
'error_message': "Thank you for commenting.",
})
|
##
## denotes automatically detected regions obtained from standard object
## detectors like face detector, eye detector, etc.
##
## Author: Abhishek Dutta <adutta@robots.ox.ac.uk>
## Date: 12 Dec. 2018
##
import threading
import os
import csv
import json # for debug
import cv2
import numpy as np
import math
import copy # to clone dict
import svt.models as models
import svt.siamrpn_tracker as siamrpn_tracker
import torch
from functools import partial
import pickle
from torch.autograd import Variable
import torch.nn.functional as F
class detections():
def __init__(self, detection_data=None, frame_id_to_filename_map=None):
if detection_data is not None and frame_id_to_filename_map is not None:
self.detection_data = detection_data
self.frame_id_to_filename_map = frame_id_to_filename_map
def read(self, detection_data, frame_id_to_filename_map):
self.detection_data = copy.deepcopy(detection_data)
self.frame_id_to_filename_map = copy.deepcopy(frame_id_to_filename_map)
def read_from_file(self, data_filename, data_format):
if os.path.isfile(data_filename) and data_format == 'csv':
self._read_detections_from_csv(data_filename)
def _read_detections_from_csv(self, filename):
self.detection_data = {}
self.frame_id_to_filename_map = {}
with open(filename) as csvfile:
filedata = csv.DictReader(csvfile)
for row in filedata:
bounding_box = [ int(row['track_id']),
float(row['x']),
float(row['y']),
float(row['width']),
float(row['height']) ]
if row['frame_id'] not in self.frame_id_to_filename_map:
self.frame_id_to_filename_map[ row['frame_id'] ] = row['frame_filename']
if row['shot_id'] in self.detection_data:
if row['frame_id'] in self.detection_data[ row['shot_id'] ]:
if row['box_id'] not in self.detection_data[ row['shot_id'] ][ row['frame_id'] ]:
## append new box to existing (shot_id,frame_id) pair
self.detection_data[ row['shot_id'] ][ row['frame_id'] ][ row['box_id'] ] = bounding_box
else:
## box_id must be unique for each (shot_id,frame_id) pair
raise ValueError('box_id=%d is not unique for shot_id=%d and frame_id=%d' %
(row['box_id'], row['shot_id'], row['frame_id']))
else:
## add new frame_id and then a new box to this shot_id
self.detection_data[ row['shot_id'] ][ row['frame_id'] ] = {}
self.detection_data[ row['shot_id'] ][ row['frame_id'] ][ row['box_id'] ] = bounding_box
else:
## create new (shot_id,frame_id,box_id)
self.detection_data[ row['shot_id'] ] = {}
self.detection_data[ row['shot_id'] ][ row['frame_id'] ] = {}
self.detection_data[ row['shot_id'] ][ row['frame_id'] ][ row['box_id'] ] = bounding_box
def match(self, tracker, config):
next_track_id = 0 # intialize globally unique track id
for shot_id in self.detection_data:
if config['verbose']:
print('Processing shot_id=%s' % (shot_id))
#### retrieve a sorted list of all frame_id for a given shot_id
frame_id_list = sorted( self.detection_data[shot_id], key=int ) # key=int ensures frame_id is treated as number
#### run a forward matching pass for each pair of consecutive frames
for frame_id_index in range(0, len(frame_id_list) - 1):
template_frame_id = frame_id_list[frame_id_index]
search_frame_id = frame_id_list[frame_id_index + 1]
template_fn = self.frame_id_to_filename_map[ template_frame_id ]
search_fn = self.frame_id_to_filename_map[ search_frame_id ]
search_bbox_list = self.detection_data[shot_id][ search_frame_id ]
#print(' %s -> %s' % (template_frame_id, search_frame_id))
#### Preload template and search image
template_abs_fn = os.path.join(config['frame_img_dir'], template_fn)
search_abs_fn = os.path.join(config['frame_img_dir'], search_fn)
template_img = self.load_image(template_abs_fn)
search_img = self.load_image(search_abs_fn)
for box_id in self.detection_data[shot_id][template_frame_id]:
#print(' box_id=%s' % (box_id))
b = self.detection_data[shot_id][template_frame_id][box_id]
template_bbox = [ b[0], int(b[1]), int(b[2]), int(b[3]), int(b[4]) ] # we don't need float
#### initialize tracker using frame k
tracker.init_tracker(template_img, template_bbox)
#### track the object in frame (k+1)
pos, size, score = tracker.track(search_img);
tracked_search_bbox = [ template_bbox[0],
int(pos[0] - size[0]/2),
int(pos[1] - size[1]/2),
int(size[0]),
int(size[1]) ]
max_overlap_search_box_id, max_overlap = self.find_most_overlapping_bbox(tracked_search_bbox, search_bbox_list)
#print(' overlap=%f, search bbox_id=%s' % (max_overlap, max_overlap_search_box_id))
if max_overlap >= config['match_overlap_threshold']:
# propagate the track_id of template's bbox to the matched search bbox
if template_bbox[0] == config['UNKNOWN_TRACK_ID_MARKER']:
self.detection_data[shot_id][template_frame_id][box_id][0] = next_track_id
next_track_id = next_track_id + 1
self.detection_data[shot_id][ search_frame_id ][max_overlap_search_box_id][0] = self.detection_data[shot_id][template_frame_id][box_id][0]
#print(' %s is track %d' % (search_frame_id, template_bbox[0]))
def export(self, outfile, outfmt, config):
if outfmt == 'plain_csv':
self.export_plain_csv(outfile, config)
return
if outfmt == 'via_annotation':
self.export_via_annotation(outfile, config)
return
if outfmt == 'via_project':
self.export_via_project(outfile, config)
return
print('Unknown export format %s' % (outfmt))
def export_plain_csv(self, outfile, config):
with open(outfile, 'w') as csvfile:
csvfile.write('shot_id,frame_id,frame_filename,track_id,box_id,x,y,width,height\n')
for shot_id in self.detection_data:
for frame_id in self.detection_data[shot_id]:
row_prefix = '%s,%s,"%s",' % (shot_id,
frame_id,
self.frame_id_to_filename_map[ frame_id ])
for box_id in self.detection_data[shot_id][frame_id]:
box = self.detection_data[shot_id][frame_id][box_id]
row_suffix1 = '%d,%s,%.3f,%.3f,%.3f,%.3f\n' % (box[0], box_id, box[1], box[2], box[3], box[4])
#row_suffix1 = '%d,%s,%d,%d,%d,%d\n' % (box[0], box_id, box[1], box[2], box[3], box[4])
csvfile.write( row_prefix + row_suffix1 )
def export_via_annotation(self, outfile, config):
with open(outfile, 'w') as viafile:
viafile.write('filename,file_size,file_attributes,region_count,region_id,region_shape_attributes,region_attributes\n')
for shot_id in self.detection_data:
for frame_id in self.detection_data[shot_id]:
frame_abs_path = os.path.join(config['frame_img_dir'],
self.frame_id_to_filename_map[ frame_id ])
frame_filesize = os.path.getsize(frame_abs_path)
row_prefix = '%s,%d,{},%d,' % (frame_abs_path,
frame_filesize,
len(self.detection_data[shot_id][frame_id]))
for box_id in self.detection_data[shot_id][frame_id]:
box = self.detection_data[shot_id][frame_id][box_id]
row_suffix1 = '%s,"{""name"":""rect"",""x"":%d,""y"":%d,""width"":%d,""height"":%d}",' % (box_id, box[1], box[2], box[3], box[4])
row_suffix2 = '"{""shot_id"":%s,""frame_id"":%s,""box_id"":%s,""track_id"":%d}"\n' % (shot_id, frame_id, box_id, box[0])
viafile.write( row_prefix + row_suffix1 + row_suffix2 )
def export_via_project(self, outfile, config):
via_project = {}
via_project['_via_settings'] = {
"ui": {
"annotation_editor_height": 25,
"annotation_editor_fontsize": 0.8,
"leftsidebar_width": 18,
"image_grid": {
"img_height": 80,
"rshape_fill": "none",
"rshape_fill_opacity": 0.3,
"rshape_stroke": "yellow",
"rshape_stroke_width": 2,
"show_region_shape": True,
"show_image_policy": "all"
},
"image": {
"region_label": "__via_region_id__",
"region_label_font": "10px Sans",
"on_image_annotation_editor_placement": "NEAR_REGION"
}
},
"core": {
"buffer_size": 18,
"filepath": {},
"default_filepath": config['frame_img_dir'] + os.sep
},
"project": {
"name": config['via_project_name']
}
}
via_project['_via_attributes'] = {
"file":{
"shot_id":{
"type": "text",
"description": "video frames shot continually by a camera are grouped under a single unique shot_id",
"default_value": "not_defined"
},
"frame_id":{
"type": "text",
"description": "unique id of each frame",
"default_value": "not_defined"
}
},
"region":{
"track_id":{
"type": "text",
"description": "regions corresponding to same object have the same globally unique track_id",
"default_value": "not_defined"
},
"box_id":{
"type": "text",
"description": "each region in a frame is assigned a unique box_id",
"default_value": "not_defined"
}
}
}
via_project['_via_img_metadata'] = {}
for shot_id in self.detection_data:
for frame_id in self.detection_data[shot_id]:
frame_filename = self.frame_id_to_filename_map[ frame_id ]
frame_abs_path = os.path.join(config['frame_img_dir'],
frame_filename)
frame_filesize = os.path.getsize(frame_abs_path)
fileid = '%s%d' % (frame_filename, frame_filesize)
via_project['_via_img_metadata'][fileid] = {'filename':frame_filename,
'size':frame_filesize}
via_project['_via_img_metadata'][fileid]['file_attributes'] = {'shot_id':shot_id,
'frame_id':frame_id}
via_project['_via_img_metadata'][fileid]['regions'] = []
for box_id in self.detection_data[shot_id][frame_id]:
box = self.detection_data[shot_id][frame_id][box_id]
via_project['_via_img_metadata'][fileid]['regions'].append( {
'shape_attributes':{'name':'rect', 'x':box[1], 'y':box[2], 'width':box[3], 'height':box[4]},
'region_attributes':{'track_id':box[0], 'box_id':box_id}
} )
with open(outfile, 'w') as jsonfile:
json.dump(via_project, jsonfile, indent=None, separators=(',',':'))
def find_most_overlapping_bbox(self, new_bbox, existing_bbox_list):
max_overlap = -1.0
max_overlap_bbox_id = -1
for bbox_id in existing_bbox_list:
bbox_i = existing_bbox_list[bbox_id]
overlap = self.compute_overlap(new_bbox, bbox_i)
if overlap > max_overlap:
max_overlap = overlap
max_overlap_bbox_id = bbox_id
return max_overlap_bbox_id, max_overlap
# assumption: a, b = [track_id, x, y, width, height]
# see: https://gist.github.com/vierja/38f93bb8c463dce5500c0adf8648d371
def compute_overlap(self, a, b):
x11 = a[1]
y11 = a[2]
x12 = a[1] + a[3]
y12 = a[2] + a[4]
x21 = b[1]
y21 = b[2]
x22 = b[1] + b[3]
y22 = b[2] + b[4]
intersect_area = 0
union_area = 0
## check if we have nested rectangles
if self.is_inside(x11, y11, x21, y21, x22, y22) and self.is_inside(x12, y12, x21, y21, x22, y22):
intersect_area = (x12 - x11) * (y12 - y11)
union_area = (x22 - x21) * ( y22 - y21)
else:
## check if we have nested rectangles
if self.is_inside(x21, y21, x11, y11, x12, y12) and self.is_inside(x22, y22, x11, y11, x12, y12):
intersect_area = (x22 - x21) * ( y22 - y21)
union_area = (x12 - x11) * (y12 - y11)
else:
## rectangles overlap or they do not overlap
x0_intersect = max(x11, x21)
y0_intersect = max(y11, y21)
x1_intersect = min(x12, x22)
y1_intersect = min(y12, y22)
intersect_area = max((x1_intersect - x0_intersect), 0) * max((y1_intersect - y0_intersect), 0)
union_area = (x12-x11)*(y12-y11) + (x22-x21)*(y22-y21) - intersect_area
return intersect_area / (union_area + 0.00001)
def is_inside(self, x, y, x0, y0, x1, y1):
if x >= x0 and x <= x1 and y >= y0 and y <= y1:
return True
else:
return False
def load_image(self, fn):
im = cv2.imread(fn)
return im
|
class Solution:
# @param numerator : integer
# @param denominator : integer
# @return a string
def fractionToDecimal(self, numerator, denominator):
if not numerator:
return "0"
sign = ""
if numerator < 0 and denominator < 0:
numerator, denominator = abs(numerator), abs(denominator)
elif numerator < 0 or denominator < 0:
sign = "-"
numerator, denominator = abs(numerator), abs(denominator)
remainders = {}
div, mod = numerator / denominator, numerator % denominator
if mod == 0:
return sign + str(div)
else:
fraction = sign + str(div) + "."
pos = len(fraction)
remainders[mod] = pos
while mod != 0:
mod *= 10
pos += 1
div, mod = mod / denominator, mod % denominator
fraction += str(div)
if mod in remainders:
fraction = fraction[:remainders[mod]] + "(" + fraction[remainders[mod]:] + ")"
break
else:
remainders[mod] = pos
return fraction
|
INSTALLED_DATABASES = ('cmaq', 'proximity', 'social')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.