max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
danbooru/utils.py
|
Hiroshiba/danbooru-library
| 0
|
12774351
|
# -*- coding: utf-8 -*-
# Copyright 2012 codestation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from os.path import exists, join, dirname, abspath
def list_generator(list_widget):
for i in range(list_widget.count()):
yield list_widget.item(i)
def parse_dimension(term, dim):
query = {}
if term[len("%s:" % dim)] == ">":
query['%s_type' % dim] = ">"
query[dim] = int(term.split(">")[1])
elif term[len("%s:" % dim)] == "<":
query['%s_type' % dim] = "<"
query[dim] = int(term.split("<")[1])
else:
query['%s_type' % dim] = "="
query[dim] = int(term.split(":")[1])
return query
def parse_query(text):
query = {}
query['tags'] = []
if isinstance(text, list):
items = text
else:
items = re.sub(' +', ' ', text).split(' ')
try:
for item in items:
if item.startswith("site:"):
query['site'] = item.split(":")[1]
elif item.startswith("rating:"):
query['rating'] = item.split(":")[1]
elif item.startswith("width:"):
query.update(parse_dimension(item, "width"))
elif item.startswith("height:"):
query.update(parse_dimension(item, "height"))
elif item.startswith("ratio:"):
query['ratio'] = item.split(":", 1)[1]
query['ratio_width'] = int(item.split(":")[1])
query['ratio_height'] = int(item.split(":")[2])
elif item.startswith("limit:"):
query['limit'] = item.split(":")[1]
elif item.startswith("pool:"):
query['pool'] = item.split(":")[1]
else:
query['tags'].append(item)
return query
except (ValueError, TypeError, IndexError):
return item
def find_resource(base, filename):
base_path = [dirname(abspath(base)),
"/usr/local/share/danbooru-daemon",
"/usr/share/danbooru-daemon"]
for path in base_path:
full_path = join(path, filename)
if exists(full_path):
return full_path
raise Exception("%s cannot be found." % filename)
def filter_posts(posts, query):
if query.get('rating'):
posts[:] = [post for post in posts
if post['rating'] == query['rating']]
if query.get('width'):
if query['width_type'] == "=":
posts[:] = [post for post in posts
if post['width'] == query['width']]
if query['width_type'] == "<":
posts[:] = [post for post in posts
if post['width'] < query['width']]
if query['width_type'] == ">":
posts[:] = [post for post in posts
if post['width'] > query['width']]
if query.get('height'):
if query['height_type'] == "=":
posts[:] = [post for post in posts
if post['height'] == query['height']]
if query['height_type'] == "<":
posts[:] = [post for post in posts
if post['height'] < query['height']]
if query['height_type'] == ">":
posts[:] = [post for post in posts
if post['height'] > query['height']]
if query.get('ratio'):
posts[:] = [post for post in posts
if post['width'] * 1.0 / post['height'] ==
query['ratio_width'] * 1.0 / query['ratio_height']]
return posts
def remove_duplicates(posts):
posts[:] = list(dict((x['id'], x) for x in posts).values())
return sorted(posts, key=lambda k: k['id'], reverse=True)
| 2.609375
| 3
|
src/blog/sitemap.py
|
manyunkai/dannysite4
| 22
|
12774352
|
# -*-coding:utf-8 -*-
"""
Created on 2015-05-21
@author: Danny<<EMAIL>>
DannyWork Project
"""
from __future__ import unicode_literals
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from .models import Blog
class BlogSitemap(Sitemap):
"""
博客 Sitemap
"""
changefreq = 'never'
priority = 0.8
def items(self):
return Blog.objects.filter(is_deleted=False, is_active=True).order_by('-created')
def lastmod(self, obj):
return obj.created
def location(self, obj):
return reverse('blog_detail', args=[obj.id])
| 2.125
| 2
|
plot.py
|
andylitalo/ccls_analysis
| 0
|
12774353
|
"""
plot.py defines functions for plotting phase diagrams of complex
coacervate liquid separation.
"""
# standard libraries
import matplotlib.pyplot as plt
from matplotlib import cm # colormap
import numpy as np
import pandas as pd
# custom libraries
import pe
import salt as nacl
# plotting libraries
import plotly.graph_objects as go
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource, Title, Range1d
from bokeh.models.tools import HoverTool
# CONSTANTS
NA = 6.022E23 # Avogadro's number, molecules / mol
m3_2_L = 1E3
K_2_C = 273.15 # conversion from Kelvin to Celsius (subtract this)
m_2_A = 1E10
def alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
T_range=[273.15,373.35], cmap_name='plasma', sigma=None,
colors=None, marker='o', lw=1, T_cels=False,
y_lim=[0.5, 1], square_box=False, tol=0.05, ax=None,
show_lgnd=True):
"""
Plots the volume fraction of supernatant phase I (alpha) vs. the overall
density of the varied component.
Note: currently eliminates data points with alpha = 1 because they tend to
be the result of numerical imprecision
T_range : 2-tuple
Lower and upper bounds on temperature to consider in degrees Kelvin
(even if T_cels is True)
tol : float, opt
Tolerance of how close volume fraction nearest single-phase region needs
to be to 1 to round up to 1 (for plotting dashed line)
"""
# creates list of colors for each value of the varied density
if colors is None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# plots volume fraction of supernatant for each composition
for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
# plots binodal for low polymer concentration [M]
rho_p, rho_s = rho_pair
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = results['lB']
alpha = results['alpha']
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \
(np.asarray(alpha) != 1)
if T_cels:
T_arr -= K_2_C
# plots alpha vs. T for given rho_p
alpha_arr = np.array(alpha)[liq_h2o]
T_arr = T_arr[liq_h2o]
ax.plot(T_arr, alpha_arr, color=colors[i],
marker=marker, lw=lw, label=r'$\rho_p = $' + \
'{0:.2f} M, '.format(rho_p) + r'$\rho_s = $' + \
'{0:.2f} M'.format(rho_s))
### Single Phase
# plots dashed line to lowest temperature if single phase
# *checks if lowest plotted temperature reaches y axis
T_min = np.min(T_arr)
if T_min > np.min(ax.get_xlim()):
alpha_single_phase = alpha_arr[np.argmin(T_arr)]
# rounds up to 1 if volume fraction is close (discontinuous phase sep)
if np.abs(alpha_single_phase - 1) < tol:
ax.plot([T_min, T_min], [alpha_single_phase, 1], '-', lw=lw,
color=colors[i])
alpha_single_phase = 1
# rounds to 0.5 if volume fraction is close (passes through LCST)
if np.abs(alpha_single_phase - 0.5) < tol:
alpha_single_phase = 0.5
# plots horizontal dashed line to indicate single phase at low T
ax.plot([ax.get_xlim()[0], T_min],
[alpha_single_phase, alpha_single_phase], '--',
lw=lw, color=colors[i])
# determines labels and limits of axes
if T_cels:
x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C]
x_label = r'$T$'
x_unit = r'$^{\circ}$C'
else:
x_lim = T_range
x_lim = r'$T$ [K]'
y_label = r'$V_{sup}/V_{tot}$'
# formats plot
format_binodal(ax, x_label, x_unit, T_range, x_lim=x_lim, y_lim=y_lim,
y_label=y_label, square_box=square_box, show_lgnd=show_lgnd)
return ax
def alpha_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M,
T_range=[273.15,373.35], cmap_name='plasma', sigma=None,
colors=None, marker='o', lw=1, T_cels=False,
y_lim=[0.5, 1], title=None, square_box=False):
"""
Plots the volume fraction of supernatant phase I (alpha) vs. the overall
density of the varied component.
Note: currently eliminates data points with alpha = 1 because they tend to
be the result of numerical imprecision
T_range : 2-tuple
Lower and upper bounds on temperature to consider in degrees Kelvin
(even if T_cels==True)
"""
# creates dictionary of values based on which component's density is varied
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of the varied density
if colors is None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_var in enumerate(rho_var_list):
# plots binodal for low polymer concentration [M]
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = results['lB']
alpha = results['alpha']
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \
(np.asarray(alpha) != 1)
if T_cels:
T_arr -= K_2_C
# plots alpha vs. T for given rho_p
ax.plot(T_arr[liq_h2o], np.array(alpha)[liq_h2o], color=colors[i],
marker=marker, lw=lw, label=r'$\rho_' + d['ch_var'] + ' = $' + \
'{0:.2f} M'.format(rho_var))
# determines labels and limits of axes
if T_cels:
x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C]
x_label = r'$T$'
x_unit = r'$^{\circ}$C'
else:
x_lim = T_range
x_lim = r'$T$ [K]'
y_label = r'$V^{sup}/V^{tot}$'
if title is None:
title = 'Effect of Total {0:s} on Supernatant Volume, {1:s} = {2:.2f} M' \
.format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix)
# formats plot
format_binodal(ax, x_label, x_unit, T_range, title=title, x_lim=x_lim,
y_lim=y_lim, y_label=y_label, square_box=square_box)
return ax
def binodal(lB_arr, left_list, right_list, left='rhoPCI', right='rhoPCII',
x_label='polyanion density', n_tie_lines=3, plot_T=True, sigma=None,
T_range=[273, 373], beads_2_M=None, title='', fix_eps=False,
deg_C=False, x_lim=None, y_lim=None, marker=True, line=False,
c1='blue', c2='red'):
"""
Plots binodal with polyanion density as x axis and temperature or
Bjerrum length as y axis using Bokeh interactive plotting methods.
Parameters
----------
lB_arr : (Nx1) numpy array
Array of Bjerrum lengths non-dimensionalized by sigma defined
in definition of "data" dictionary.
left_list : N-element list
List of x-axis variable in phase I (supernatant) [beads/sigma^3]
right_list : N-element list
List of x-axis variable in phase II (coacervate) [beads/sigma^3]
left : string
Name of heading in df of the variable given in left_list
right : string
Name of heading in df of the variable given in right_list
x_label : string
Variable to be plotted along the x-axis (without units)
n_tie_lines : int
Number of tie lines to plot
plot_T : bool
y axis is temperature [K] if True, Bjerrum [sigma] if False
T_range : 2-element list
Lower and upper bound for temperatures to plot (to limit temperatures
to those for which water is liquid)
beads_2_M : float
Conversion from beads/sigma^3 to moles of monomers / L. If None, no
conversion is made and the units on the x axis are beads/sigma^3.
title : string
Title of plot
fix_eps : bool
Fixed epsilon to constant value if True, or allows it to vary with
temperature if False.
deg_C : bool, opt
If True, temperature is shown in degrees Celsius (assuming it is
provided in Kelvin), default = False.
x_lim : 2-element tuple of floats, optional
Lower and upper bounds of x axis. If None provided, automatically set.
y_lim : 2-element tuple of floats, optional
Lower and upper bounds of y axis. If None provided, automatically set.
Returns
-------
p : bokeh plot
Plot of binodal. Use bokeh's "show(p)" to display. Use "output_notebook()" beforehand
to show the plot in the same cell (instead of a separate browser tab).
"""
left_arr = np.copy(left_list)
right_arr = np.copy(right_list)
# calculates conversion from beads / sigma^3 to mol/L
if beads_2_M is not None:
left_arr *= beads_2_M
right_arr *= beads_2_M
units_rho = '[mol/L]'
else:
units_rho = '[beads/sigma^3]'
# computes temperature corresponding to Bjerrum lengths
T_arr = pe.lB_2_T_arr(lB_arr, T_range, fix_eps=fix_eps, sigma=sigma)
# stores results in dataframe for plotting
df_mu = pd.DataFrame(columns=['BJ', 'T', left, right])
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
df_mu['BJ'] = lB_arr[liq_h2o]
df_mu['T'] = T_arr[liq_h2o] - deg_C*273 # converts to degrees Celsius if requested
df_mu[left] = left_arr[liq_h2o] # monomer density
df_mu[right] = right_arr[liq_h2o] # monomer density
# plots binodal at fixed chemical potential
n_plot = len(df_mu)
if n_plot == 0:
print('No data to plot in plot.binodal()--error likely.')
p = no_salt(df_mu, n_plot, left=left, right=right, x_label=x_label,
n_tie_lines=n_tie_lines, plot_T=plot_T, marker=marker, line=line,
title=title, units_rho=units_rho, deg_C=deg_C, c1=c1, c2=c2)
# sets axis limits if requested
if x_lim is not None:
p.x_range = Range1d(*x_lim)
if y_lim is not None:
p.y_range = Range1d(*y_lim)
return p
def binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273.15,373.15], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full', lw_sup=1,
lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False,
c_sup='#1414FF', c_co='#FF0000', ls_sup='-',
square_box=False, plot_fixed_rho=False, ax=None,
show_lgnd=True):
"""
Like `binodal_vary_rho()` but allows user to customize both rho_p and rho_s
(overall) of each condition, rather than fixing one for all conditions.
"""
# creates list of colors for each value of rho_p
if colors is None:
if cmap_name is not None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
rho_p, rho_s = rho_pair
# plots binodal for low polymer concentration [M]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
rho_PAI_list = results['rho_PAI']
rho_PAII_list = results['rho_PAII']
rho_AI_list = results['rho_AI']
rho_AII_list = results['rho_AII']
lB_arr = results['lB']
alpha = results['alpha']
# selects the x-axis data
if x_var == 'polycation':
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
elif x_var == 'polyanion':
left_arr = np.array(rho_PAI_list)
right_arr = np.array(rho_PAII_list)
elif x_var == 'cation':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
elif x_var == 'anion':
left_arr = np.array(rho_AI_list)
right_arr = np.array(rho_AII_list)
elif x_var == 'solvent':
left_arr = pe.calc_rho_solv(rho_PCI_list,
rho_CI_list,
beads_2_M)
right_arr = pe.calc_rho_solv(rho_PCII_list,
rho_CII_list,
beads_2_M)
elif x_var == 'polyelectrolyte':
left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list)
right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list)
elif x_var == 'salt':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
else:
print('Error. Invalid x variable in plot.binodal_vary_rho().')
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# assigns separate colors to coacervate and supernatant if not specified
if colors is not None:
c_sup = colors[i]
c_co = colors[i]
# supernatant
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup,
marker=marker, fillstyle=fill_left, ls=ls_sup,
label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \
r'$\rho_s = $' + '{0:.2f} M, supernatant'.format(rho_s),
lw=lw_sup)
# coacervate
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co,
marker=marker, fillstyle=fill_right,
label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \
r'$\rho_s = $' + '{0:.2f} M, coacervate'.format(rho_s),
lw=lw_co)
# plots dashed line indicating fixed density if requested
if plot_fixed_rho:
# defines dictionary mapping x variable to corresponding fixed
# density
x_var_2_rho_fixed = {'polycation' : rho_p/2,
'cation' : rho_s,
'solvent' : 1 - rho_p - rho_s,
'polyelectrolyte' : rho_p,
'salt' : rho_s}
# selects appropriate fixed density based on x variable
rho_fixed = x_var_2_rho_fixed[x_var]
# determines color based on which branch is closest
if (rho_fixed - np.max(left_arr[liq_h2o])) > \
(np.min(right_arr[liq_h2o]) - rho_fixed):
# coacervate branch is closest to fixed density
color = c_co
else:
# supernatant branch is closest to fixed density
color = c_sup
# plots fixed density as vertical dashed line
ax.plot([rho_fixed, rho_fixed], ax.get_ylim(), '--', color=color,
lw=lw_sup)
# determines units of density to display on plot
if beads_2_M is not None:
units_rho = 'mol/L'
else:
units_rho = 'beads/sigma^3'
# formats plot
format_binodal(ax, x_label, units_rho, T_range, x_lim=x_lim,
T_cels=T_cels, square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, show_tie_line=True,
cmap_name='plasma', colors=None, sigma=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False,
T_range=[273.15, 373.15], lw=2, square_box=False, ax=None,
colors_symbols=None, mew=1.5, x_lim=None, y_lim=None,
show_lgnd=True):
"""
Plots the binodal as a function of salt density and polyelectrolyte
density. Different Bjerrum lengths/temperatures are represented by
different trend lines.
Returns
-------
None.
"""
# variables defining order of plotted objects
back = 0
front = 10
# lists symbols for plotting overall composition
sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x']
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(lB_list))
# determines units
if beads_2_M != 1:
units_rho = 'mol/L'
else:
units_rho = r'beads/$\sigma^3$'
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# loops through each temperature / Bjerrum length in data
for i, lB in enumerate(lB_list):
df = data[lB]
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# computes temperature
T = pe.lB_2_T(lB, sigma=sigma)
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots tie lines and overall composition
for j, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
rho_p, rho_s = rho_pair
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = np.asarray(results['lB'])
alpha = results['alpha']
# converts to arrays of polymer and salt concentrations
rho_p_I = 2*np.asarray(rho_PCI_list)
rho_s_I = np.asarray(rho_CI_list)
rho_p_II = 2*np.asarray(rho_PCII_list)
rho_s_II = np.asarray(rho_CII_list)
# continues if no T in range has 2 phases for concentration
# finds closest match given Bjerrum length
try:
i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0]
except:
print('lB = {0:.3f} gives 1 phase for'.format(lB) + \
' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \
'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho))
continue
# tie line
if show_tie_line:
ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]],
[rho_s_I[i_tie], rho_s_II[i_tie]], '--',
color='k', lw=lw, zorder=back)
# supernatant
ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# coacervate
ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# plots overall composition last time through
if i == len(lB_list)-1:
short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'}
if sym_list[j] == '*':
ms_boost = 4
else:
ms_boost = 0
# if provided, can specify marker face color
if colors_symbols is not None:
mfc = colors_symbols[j]
else:
mfc = 'w'
# plots symbol representing composition
ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor=mfc,
ms=ms+ms_boost, markeredgecolor='k',
markeredgewidth=mew, lw=0,
label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p,
short[units_rho]) + r', $\rho_s = $ ' + \
'{0:.2f} {1:s}'.format(rho_s, short[units_rho]),
zorder=front)
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit)
if show_lB:
label += r', $l_B = $ ' + '{0:.3f}'.format(lB)
ax.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1],
color=colors[i], lw=lw,
label=label, zorder=back)
# plots critical point
ax.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=colors[i], zorder=front)
# formats plot
x_label = r'$\rho_p$'
y_label = r'$\rho_s$ [' + units_rho + ']'
# determines component with varied concentration
name_pair = ['Polymer', 'Salt']
format_binodal(ax, x_label, units_rho, T_range, y_label=y_label,
x_lim=x_lim, y_lim=y_lim, lgnd_out=lgnd_out,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_line_3d(data, mode='lines', ms=8, op=0.1,
c1='black', c2='black', lw=8, fig=None):
"""Plots line binodal in 3d plot."""
x1, y1, z1, x2, y2, z2 = data
fig = line_3d(x1, y1, z1, mode=mode, ms=ms, op=op, c=c1, lw=lw, fig=fig)
fig = line_3d(x2, y2, z2, mode=mode, ms=ms, op=op, c=c2, lw=lw, fig=fig)
return fig
def binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list, color_list,
T_range, sigma, z_name, beads_2_M, lB_list,
lB_color_list, T_cels=False, marker='o', show_lB=False,
fill_left='none', fill_right='full', lw_sup=1, lw_co=3,
lw_lB=2, naming_structure='NA(100)NB(100)*', ext='PD',
figsize=None, vertical=True):
"""
Computes binodal projected onto three different planes (polymer-temperature,
salt-temperature, and polymer-salt) at fixed concentration of salt in a
saltwater reservoir.
show_lB : bool, optional
If True, will show Bjerrum length in legend
"""
### Formats Figure
# creates figure to plot the three 2D projections in a single row
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### Creates Axes
if vertical:
h = 3 # 3 plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = 3 # 3 plots wide
# polymer-T projection
ax_pT = fig.add_subplot(h, w, 1)
# salt-T projection
ax_sT = fig.add_subplot(h, w, 2)
# polymer-salt projection
ax_ps = fig.add_subplot(h, w, 3)
# computes binodal at different saltwater reservoir concentrations
# and plots on each of the three projections
for rho_salt_M, color in zip(rho_salt_M_list, color_list):
# converts mol/L to beads/sigma^3
rho_salt = rho_salt_M / beads_2_M
# makes dataframe of binodal for fixed salt reservoir concentration
df_mu = nacl.make_df_mu(data, mu_salt_folder, rho_salt, T_range, sigma,
naming_structure=naming_structure, ext=ext)
rho_p_I, rho_s_I, T_arr, rho_p_II, rho_s_II, _ = nacl.extract_df_mu_data(df_mu, z_name)
# computes temperature and identifies data within range
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# creates labels
label_sup = r'$\rho_s^{res} = $' + '{0:.2f} M, supernatant'.format(rho_salt_M)
label_co = r'$\rho_s^{res} = $' + '{0:.2f} M, coacervate'.format(rho_salt_M)
# polymer-T projection
ax_pT.plot(rho_p_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_left, label=label_sup, lw=lw_sup)
ax_pT.plot(rho_p_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_right, label=label_co, lw=lw_co)
# salt-T projection
ax_sT.plot(rho_s_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_left, label=label_sup, lw=lw_sup)
ax_sT.plot(rho_s_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_right, label=label_co, lw=lw_co)
# polymer-salt projection
ax_ps.plot(rho_p_I[liq_h2o], rho_s_I[liq_h2o], color=color, label=label_sup, lw=lw_sup, zorder=10)
ax_ps.plot(rho_p_II[liq_h2o], rho_s_II[liq_h2o], color=color, label=label_co, lw=lw_co, zorder=10)
# plots isothermal binodal slices in polymer-salt plane
for lB, lB_color in zip(lB_list, lB_color_list):
df = data[lB]
T = pe.lB_2_T(lB, sigma=sigma)
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# units for temperature
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s} '.format(int(T), T_unit)
if show_lB:
label += r'$l_B = $ ' + '{0:.3f}'.format(lB)
ax_ps.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1], color=lB_color, lw=lw_lB,
label=label, zorder=0)
# plots critical point
ax_ps.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=lB_color)
return fig, ax_pT, ax_sT, ax_ps
def binodal_rho_rho(data, lB_list, rho_var_list, rho_fix,
ch_var, beads_2_M, show_tie_line=True,
cmap_name='plasma', colors=None, sigma=None, title=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False,
T_range=[273.15, 373.15], lw=2, square_box=False, ax=None):
"""
Plots the binodal as a function of salt density and polyelectrolyte
density. Different Bjerrum lengths/temperatures are represented by
different trend lines.
Returns
-------
None.
"""
# variables defining order of plotted objects
back = 0
front = 10
# lists symbols for plotting overall composition
sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x']
# creates dictionary to order fixed and varied densities properly
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(lB_list))
# determines units
if beads_2_M != 1:
units_rho = 'mol/L'
else:
units_rho = r'beads/$\sigma^3$'
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# loops through each temperature / Bjerrum length in data
for i, lB in enumerate(lB_list):
df = data[lB]
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# computes temperature
T = pe.lB_2_T(lB, sigma=sigma)
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots tie lines and overall composition
for j, rho_var in enumerate(rho_var_list):
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = np.asarray(results['lB'])
alpha = results['alpha']
# converts to arrays of polymer and salt concentrations
rho_p_I = 2*np.asarray(rho_PCI_list)
rho_s_I = np.asarray(rho_CI_list)
rho_p_II = 2*np.asarray(rho_PCII_list)
rho_s_II = np.asarray(rho_CII_list)
# continues if no T in range has 2 phases for concentration
# finds closest match given Bjerrum length
try:
i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0]
except:
print('lB = {0:.3f} gives 1 phase for'.format(lB) + \
' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \
'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho))
continue
# tie line
if show_tie_line:
ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]],
[rho_s_I[i_tie], rho_s_II[i_tie]], '--',
color='k', lw=lw, zorder=back)
# supernatant
ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# coacervate
ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# plots overall composition last time through
if i == len(lB_list)-1:
short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'}
if sym_list[j] == '*':
ms_boost = 4
else:
ms_boost = 0
ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor='w',
ms=ms+ms_boost, markeredgecolor='k',
markeredgewidth=1.5, lw=0,
label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p,
short[units_rho]) + r', $\rho_s = $ ' + \
'{0:.2f} {1:s}'.format(rho_s, short[units_rho]),
zorder=front)
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit)
if show_lB:
label += r', $l_B = $ ' + '{0:.3f}'.format(lB)
ax.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1],
color=colors[i], lw=lw,
label=label, zorder=front)
# plots critical point
ax.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=colors[i], zorder=front)
# formats plot
x_label = r'$\rho_p$'
y_label = r'$\rho_s$ [' + units_rho + ']'
# determines component with varied concentration
name_pair = ['Polymer', 'Salt']
name_var = name_pair[d['order'][0]]
if title is None:
title = 'Vary Overall {0:s} Concentration'.format(name_var)
format_binodal(ax, x_label, units_rho, T_range, y_label=y_label, title=title,
lgnd_out=lgnd_out, square_box=square_box)
return ax
def binodal_surf_3d(data, mode='markers', ms=4, op=0.01,
c1='blue', c2='red', lw=0, fig=None):
"""Plots surface binodal in 3d."""
x1, y1, z1, x2, y2, z2 = data
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of full binodal
fig = fig.add_trace(go.Scatter3d(
x=x1, y=y1, z=z1,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c1
),
line=dict(
color=c1,
width=lw,
),
))
# plots phase II (coacervate) of full binodal
fig.add_trace(go.Scatter3d(
x=x2, y=y2, z=z2,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c2
),
line=dict(
color=c2,
width=lw,
),
))
return fig
def binodal_surf_3d_batch(data_3d, op, ms, lw, mode, fig=None, skip=[]):
"""
Plots batch of data for a 3d surface binodal.
"""
# extracts data
x1_coll, y1_coll, z1_coll, x2_coll, y2_coll, z2_coll = data_3d
z_arr = np.unique(z1_coll)
# plots data at each z value
for (i, z) in enumerate(z_arr):
# skips indices requested
if i in skip:
continue
# extracts data corresponding to current z value (T or lB)
x1 = x1_coll[z1_coll==z]
y1 = y1_coll[z1_coll==z]
z1 = z1_coll[z1_coll==z]
x2 = x2_coll[z2_coll==z]
y2 = y2_coll[z2_coll==z]
z2 = z2_coll[z2_coll==z]
# plots dataon 3D plot
fig = binodal_surf_3d((x1, y1, z1, x2, y2, z2), op=op, ms=ms, lw=lw,
mode=mode, fig=fig)
return fig
def binodal_vary_conc(mu_salt_folder, data, rho_salt_list, beads_2_M, qty,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273,373], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True):
"""
LEGACY
Plots the binodal for different average densities of polymer.
qty : string
The quantity from df to return. Options include 'rhoPC', 'rhoPA',
'rhoC', and 'rhoA'.
"""
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(rho_salt_list))
# creates figure
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_salt in enumerate(rho_salt_list):
# plots binodal for low polymer concentration [M]
mu_conc = nacl.get_mu_conc(mu_salt_folder, data, rho_salt, beads_2_M=beads_2_M)
try:
lB_arr, rho_PCI_list, rho_PCII_list = nacl.fixed_conc(mu_conc, data, qty, beads_2_M=beads_2_M)
except:
continue
# selects the x-axis data
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# determines units
if beads_2_M is not None:
units_rho = '[mol/L]'
else:
units_rho = '[beads/sigma^3]'
# left binodal
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=colors[i],
marker=marker, fillstyle=fill_left,
label=r'$\rho_{salt} = $' + '{0:.2f} {1:s}, supernatant' \
.format(rho_salt, units_rho))
# right binodal
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=colors[i],
marker=marker, fillstyle=fill_right,
label=r'$\rho_{salt} = $' + \
'{0:.2f} {1:s}, coacervate'.format(rho_salt, units_rho))
# formats plot
ax.set_ylim(T_range)
ax.set_xlabel(x_label + ' ' + units_rho, fontsize=16)
ax.set_ylabel(r'$T$ [K]', fontsize=16)
ax.tick_params(axis='both', labelsize=14)
ax.set_title('Effect of Salt Reservoir on Binodal', fontsize=16)
# put legend outside of plot box
if lgnd_out:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
plt.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=12)
else:
plt.legend(fontsize=12)
return ax
def binodal_vary_f(data, f_list, color_list, T_cels=True, x_label=r'$\rho_p$',
units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4,
square_box=True, show_lgnd=False, ax=None):
"""
Plots binodal projected onto coordinate plane for different charge fractions
f.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for f, color in zip(f_list, color_list):
# creates labels
label_sup = r'$f$ =' + ' {0:.2f} supernatant'.format(f)
label_co = r'$f$ =' + ' {0:.2f} coacervate'.format(f)
# extracts data
T_arr, rho_p_I, rho_p_II = data[f]
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_N(data, N_list, color_list, T_cels=True, x_label=r'$\rho_p$',
units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4,
square_box=True, show_lgnd=False, ax=None):
"""
Plots binodal projected onto coordinate plane for different degrees of
polymerization N.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(131)
for N, color in zip(N_list, color_list):
# extracts data for given N
T_arr, rho_p_I, rho_p_II = data[N]
# creates labels
label_sup = r'$N$ =' + ' {0:d} supernatant'.format(N)
label_co = r'$N$ =' + ' {0:d} coacervate'.format(N)
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_sigma(data, sigma_list, color_list,
T_cels=True, x_label=r'$\rho_p$', units_rho='M',
T_range=[273.15, 373.15], lw1=1, lw2=4, square_box=True,
show_lgnd=False, x_lim=None, ax=None):
"""
Plots binodal projected onto coordinate plane for different charge fractions
f.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for sigma, color in zip(sigma_list, color_list):
# creates labels
label_sup = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ supernatant'
label_co = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ coacervate'
# extracts data
T_arr, rho_p_I, rho_p_II = data[sigma]
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels, x_lim=x_lim,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273.15,373.15], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full', lw_sup=1,
lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False,
title=None, c_sup='#1414FF', c_co='#FF0000', ls_sup='-',
square_box=False, ax=None):
"""
Plots the binodal for different average densities of polymer.
If T_cels is True, converts the temperature from Kelvin to Celsius
"""
# creates dictionary of values based on which component's density is varied
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of rho_p
if colors is None:
if cmap_name is not None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
### Plots figure
for i, rho_var in enumerate(rho_var_list):
# plots binodal for low polymer concentration [M]
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
rho_PAI_list = results['rho_PAI']
rho_PAII_list = results['rho_PAII']
rho_AI_list = results['rho_AI']
rho_AII_list = results['rho_AII']
lB_arr = results['lB']
alpha = results['alpha']
# selects the x-axis data
if x_var == 'polycation':
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
elif x_var == 'polyanion':
left_arr = np.array(rho_PAI_list)
right_arr = np.array(rho_PAII_list)
elif x_var == 'cation':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
elif x_var == 'anion':
left_arr = np.array(rho_AI_list)
right_arr = np.array(rho_AII_list)
elif x_var == 'solvent':
left_arr = pe.calc_rho_solv(rho_PCI_list,
rho_CI_list,
beads_2_M)
right_arr = pe.calc_rho_solv(rho_PCII_list,
rho_CII_list,
beads_2_M)
elif x_var == 'polyelectrolyte':
left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list)
right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list)
elif x_var == 'salt':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
else:
print('Error. Invalid x variable in plot.binodal_vary_rho().')
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# assigns separate colors to coacervate and supernatant if not specified
if colors is not None:
c_sup = colors[i]
c_co = colors[i]
# supernatant
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup,
marker=marker, fillstyle=fill_left, ls=ls_sup,
label=r'$\rho_' + d['ch_var'] + ' = $' + '{0:.2f} M, supernatant' \
.format(rho_var), lw=lw_sup)
# coacervate
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co,
marker=marker, fillstyle=fill_right,
label=r'$\rho_' + d['ch_var'] + ' = $' + \
'{0:.2f} M, coacervate'.format(rho_var), lw=lw_co)
# determines units of density to display on plot
if beads_2_M is not None:
units_rho = 'mol/L'
else:
units_rho = 'beads/sigma^3'
# formats plot
if title is None:
title = 'Effect of {0:s} on Binodal, {1:s} = {2:.2f} M' \
.format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix)
format_binodal(ax, x_label, units_rho, T_range, title=title, x_lim=x_lim,
T_cels=T_cels, square_box=square_box)
return ax
def fig4(data_pred, df_exp, rho_s_raw_list, rho_p_raw, sigma, T_range,
lw=3, c_sup='#1414FF', c_co='#FF0000', ms=11,
mfc='w', mew=1.5, x_lim=None, x_label=r'$\rho_{PSS}$',
conv_vals=False, tol=1E-6, show_lgnd=False,
figsize=None, pad=3, vertical=False, plot_errorbars=False):
"""
Validates fit of sigma to experiments.
"""
# computes conversion from beads/sigma^3 to mol/L
beads_2_M = pe.get_beads_2_M(sigma, SI=True)
# creates figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
# determines arrangement of subplots
if vertical:
h = len(rho_s_raw_list) # many plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = len(rho_s_raw_list) # many plots wide
# Plots figure
for i, rho_s_raw in enumerate(rho_s_raw_list):
if conv_vals:
rho_p, rho_s = nacl.conv_ali_conc(df_exp, rho_p_raw, rho_s_raw)
# creates subplot
ax = fig.add_subplot(h, w, i+1)
# polymer-temperature plane
ax = binodal_custom_rho(data_pred, [rho_p], [rho_s], beads_2_M,
x_var='polycation', x_label=x_label,
x_lim=x_lim, sigma=sigma, T_range=T_range,
marker='', lw=lw, lw_sup=lw, lw_co=lw,
colors=None, cmap_name=None, T_cels=True,
c_sup=c_sup, c_co=c_co, ls_sup='--',
square_box=True, show_lgnd=show_lgnd, ax=ax)
# plots experimental results
for i in range(len(df_exp)):
rho_p_exp, rho_s_exp, T_exp, \
rho_p_sup, rho_p_co, s_rho_p_sup, \
s_rho_p_co = nacl.read_df_exp(df_exp, i, conv_vals=conv_vals,
read_sigma=plot_errorbars)
if (rho_p_exp == rho_p) and (rho_s_exp == rho_s):
# plots supernatant and coacervate compositions
rho_pss_sup = rho_p_sup/2
rho_pss_co = rho_p_co/2
if plot_errorbars:
s_rho_pss_sup = s_rho_p_sup/2
s_rho_pss_co = s_rho_p_co/2
ax.errorbar(rho_pss_sup, T_exp, xerr=s_rho_pss_sup, lw=0, marker='o', ms=ms,
markerfacecolor=mfc, markeredgewidth=mew, elinewidth=1,
markeredgecolor=c_sup, label='Ali et al. (2019), supernatant')
ax.errorbar(rho_pss_co, T_exp, xerr=s_rho_pss_co, lw=0, marker='o', ms=ms,
markerfacecolor=c_co, markeredgewidth=mew, elinewidth=1,
markeredgecolor=c_co, label='Ali et al. (2019), coacervate')
else:
ax.plot(rho_pss_sup, T_exp, lw=0, marker='o', ms=ms,
markerfacecolor=mfc, markeredgewidth=mew,
markeredgecolor=c_sup, label='Ali et al. (2019), supernatant')
ax.plot(rho_pss_co, T_exp, lw=0, marker='o', ms=ms,
markerfacecolor=c_co, markeredgewidth=mew,
markeredgecolor=c_co, label='Ali et al. (2019), coacervate')
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def figs3(data_folder_N, data_folder_f, data_folder_sigma,
mu_salt_folder_N, mu_salt_folder_f, mu_salt_folder_sigma,
rho_s_M_N, rho_s_M_f, rho_s_M_sigma, ext_N, ext_f, ext_sigma,
N_list, f_list, sigma_list, color_list_N, color_list_f,
color_list_sigma, sigma_fixed, x_lim_sigma=[0,6], figsize=None, pad=3,
naming_structure_sigma='NA(100)NB(100)lB(*)', lB_lo=1.3, lB_hi=2.398):
"""Plots Figure S3 of SI showing effects of N, f, and sigma on
binodal projections in polymer-temperature plane."""
# creates figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### Effect of varying N
print('loading N data')
# adds subplot
axN = fig.add_subplot(131)
# extracts data
data_vary_N = nacl.binodal_vary_N_data(data_folder_N, mu_salt_folder_N,
rho_s_M_N, N_list, sigma=sigma_fixed, ext=ext_N)
# plots data
print('plotting N data')
_ = binodal_vary_N(data_vary_N, N_list, color_list_N, ax=axN)
### Effect of varying charge fraction f
# adds subplot
axf = fig.add_subplot(132)
# extracts data
print('loading f data')
data_vary_f = nacl.binodal_vary_f_data(data_folder_f, mu_salt_folder_f,
rho_s_M_f, f_list,
sigma=sigma_fixed, ext=ext_f)
# plots data
print('plotting f data')
_ = binodal_vary_f(data_vary_f, f_list, color_list_f, ax=axf)
### Effect of varying sigma
axsigma = fig.add_subplot(133)
# laads all data
print('loading sigma data')
data = nacl.load_data(data_folder_sigma, ext=ext_sigma,
naming_structure=naming_structure_sigma, lB_lo=lB_lo, lB_hi=lB_hi)
# extracts relevant data
data_vary_sigma = nacl.binodal_vary_sigma_data(data, mu_salt_folder_sigma,
rho_s_M_sigma, sigma_list, ext=ext_sigma)
# plots data
print('plotting sigma data')
_ = binodal_vary_sigma(data_vary_sigma, sigma_list,
color_list_sigma, ax=axsigma, x_lim=x_lim_sigma)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def compare_to_exp(data, beads_2_M, rho_p_list=[0.3], rho_s_list=[1.6, 1.85, 1.9],
N=100, f=1, sigma=4, t_fs=12, T_range=[273.15, 323.15]):
"""
Compares predictions from data to the experiment in the Prabhu group.
"""
# sets x and y axis limits
x_lim = (-0.05, 1.3) # [mol/L]
y_lim = (0, 60) # [C]
# sets temperature range
T_range = [273, 333]
for rho_s in rho_s_list:
for rho_p in rho_p_list:
# computes polycation concentrations at different temperatures for fixed polymer and salt [mol/L]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
lB_arr = results['lB']
# plots binodal
title = '{0:.2f} M Salt, {1:.2f} M Polymer, N = {2:d}, f = {3:.2f}, sig = {4:.2f} A'.format(rho_s, rho_p, N, f, sigma)
p = binodal(lB_arr, rho_PCI_list, rho_PCII_list, title=title,
beads_2_M=1, n_tie_lines=0, deg_C=True, T_range=T_range,
x_lim=x_lim, y_lim=y_lim, marker=False, line=True)
p.title.text_font_size = '{0:d}pt'.format(t_fs)
show(p)
return
def crit_line_3d(data_cp, c_crit, lw_crit, fig):
"""
Plots critical line in 3D, typically for 3D surface binodal plot.
LEGACY
"""
polymer_c_list, salt_c_list, z_arr = data_cp
fig.add_trace(go.Scatter3d(
x=polymer_c_list,
y=salt_c_list,
z=z_arr,
mode='lines',
line=dict(
color=c_crit,
width=lw_crit,
),
),
)
return fig
def fig1(data_3d, data_cp, data_z, data_mu, plot_params, fixed_T=True,
fixed_salt=True, crit_line=True, fixed_comp=False,
data_comp=None, data_outlines=None, skip=[], plot_axes=True,
outline_scale_factor=1.02, toc_fig=False, has_ucst=False,
show_labels=True):
"""
Plots Figure 1 from CCLS paper: 3d surface binodal, fixed T 2d line binodal,
fixed salt reservoir concentration 2d line binodal, and critical line.
"""
# if Table of Contents (TOC) figure, removes all but LCST
if toc_fig:
fixed_salt = True
crit_line = True
fixed_comp = False
fixed_T = False
x_range, y_range, z_range, eye_xyz, op, ms_bin, lw_bin, \
lw_fix, lw_crit, lw_outline, c1_T, c2_T, c1_fix, c2_fix, \
c_crit, c_outline, mode, width, height, fs, offset = plot_params
x, y, z = eye_xyz
# plots 3d surface binodal
fig = binodal_surf_3d_batch(data_3d, op, ms_bin, lw_bin, mode, skip=skip)
if crit_line:
# plots critical line
fig = line_3d(*data_cp, c=c_crit, lw=lw_crit, fig=fig)
if fixed_T:
# plots binodal at fixed z value (temperature or Bjerrum length)
fig = binodal_line_3d(data_z, fig=fig, lw=lw_fix, c1=c1_T, c2=c2_T)
if fixed_salt:
### FIXED SALT CONCENTRATION ###
# if there is a UCST, split the binodal in two
if has_ucst:
# identifies threshold between UCST and LCST by largest gap in z
z1 = data_mu[2]
z1_diff = np.diff(z1)
i_thresh = np.argmax(z1_diff)
thresh_ucst = (z1[i_thresh] + z1[i_thresh+1])/2
# splits data below UCST and above LCST
ucst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 < thresh_ucst]))
lcst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 > thresh_ucst]))
# plots UCST and LCST data separately
fig = binodal_line_3d(ucst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
fig = binodal_line_3d(lcst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
else:
# plots data for fixed saltwater reservoir concentration
fig = binodal_line_3d(data_mu, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
if fixed_comp:
# plots binodal at fixed overall salt, polymer concentration #
fig = binodal_line_3d(data_comp, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
# plots outlines of the surface for definition
if data_outlines is not None:
for data_outline in data_outlines:
data_outline_scaled = []
for coord in data_outline:
coord = outline_scale_factor*np.asarray(coord)
data_outline_scaled += [coord]
fig = binodal_line_3d(data_outline_scaled, c1=c_outline,
c2=c_outline, fig=fig)
if plot_axes:
# x-axis
fig = line_3d(x_range, [offset, offset], [z_range[0] + offset,
z_range[0] + offset], lw=12, c=c_outline, fig=fig)
# y-axis
fig = line_3d([offset, offset], y_range, [z_range[0] + offset,
z_range[0] + offset], lw=12, c=c_outline, fig=fig)
# z-axis
fig = line_3d([offset, offset], [offset, offset], z_range,
c=c_outline, lw=12, fig=fig)
### FORMATS FIGURE ###
fig.update_layout(
scene = dict(xaxis = dict(range=x_range,),
yaxis = dict(range=y_range,),
zaxis = dict(range=z_range,),
),
width = width,
height = height,
# changes initial view of figure
scene_camera = dict(
eye=dict(x=x, y=y, z=z),
# center=dict(x=0, y=0.3, z=0.3),
# up=dict(x=0, y=0, z=1)
),
font = dict(
family='Arial',
color='black',
size=fs)
)
### Cleanup
# removes legend (too crowded to be off use)
fig.update_layout(showlegend=False)
#removes tick labels and axis titles (so I can add them myself)
if not show_labels:
fig.update_layout(
scene = dict(xaxis = dict(showticklabels=False, title=''),
yaxis = dict(showticklabels=False, title=''),
zaxis = dict(showticklabels=False, title='',
tickmode = 'linear',
tick0 = 0,
dtick = 50),
),
)
return fig
def fig2a(rho_salt_M_list_list, data, mu_salt_folder,
color_list, T_range, sigma, z_name,
beads_2_M, lB_list, lB_color_list, pad,
kwargs, units_rho='mol/L', show_lgnd=False, y_lim_T=(0, 100),
rho_p_label=r'$\rho_p$', rho_s_label=r'$\rho_s$',
y_lim_s=[0, 2.25]):
"""Plots Figure 2a of binodal projections at different saltwater concentrations."""
for rho_salt_M_list in rho_salt_M_list_list:
# plots binodal projections
fig, ax_pT, ax_sT, \
ax_ps = binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list,
color_list, T_range, sigma, z_name,
beads_2_M, lB_list, lB_color_list,
**kwargs)
# formats plots
ax_pT = format_binodal(ax_pT, rho_p_label, units_rho, T_range,
T_cels=kwargs['T_cels'], y_lim=y_lim_T,
show_lgnd=show_lgnd)
ax_sT = format_binodal(ax_sT, rho_s_label, units_rho, T_range,
T_cels=kwargs['T_cels'], y_lim=y_lim_T,
show_lgnd=show_lgnd)
ax_ps = format_binodal(ax_ps, rho_p_label, units_rho, T_range,
y_label=rho_s_label + ' [' + units_rho + ']',
show_lgnd=show_lgnd, y_lim=y_lim_s)
# pads plots with whitespace
fig.tight_layout(pad=pad)
return fig
def fig2b(data, rho_p_list, rho_s_list, beads_2_M, lB_list, color_list,
lB_color_list, kwargs, alpha_y_lim=(0.5,1.05),
alpha_yticks=(0.5,0.75,1), figsize=None, pad=3, mew=0.5,
show_lgnd=False):
"""Plots Figure 2b of binodal projections at different overall compositions."""
### Formats Figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### polymer-temperature plane ###
ax1 = fig.add_subplot(221)
_ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='polyelectrolyte', x_label=r'$\rho_p$',
marker='', colors=color_list,
plot_fixed_rho=True, ax=ax1, show_lgnd=show_lgnd,
**kwargs)
### salt-temperature plane ###
ax2 = fig.add_subplot(222)
_ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='salt', x_label=r'$\rho_s$', marker='',
colors=color_list, plot_fixed_rho=True,
ax=ax2, show_lgnd=show_lgnd, **kwargs)
### polymer-salt plane ###
ax3 = fig.add_subplot(223)
_ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, colors=lB_color_list, mew=mew, ax=ax3,
show_lgnd=show_lgnd, colors_symbols=color_list,
**kwargs)
### volume fraction of supernatant vs. temperature ###
ax4 = fig.add_subplot(224)
_ = alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
y_lim=alpha_y_lim, marker='',
colors=color_list, ax=ax4, show_lgnd=show_lgnd,
**kwargs)
# customizes tick mark locations
ax4.set_yticks(alpha_yticks)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def fig3(data, lB_list, rho_p_fixed, rho_s_fixed, rho_p_varied, rho_s_varied,
beads_2_M, kwargs, figsize=None, pad=3, vertical=True):
"""Plots Figure 3 of tie lines in polymer-salt plane."""
# formats figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
# determines arrangement of subplots
if vertical:
h = 2 # 2 plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = 2 # 2 plots wide
################ VARIES SALT CONCENTRATION ###############
# creates subplot
ax1 = fig.add_subplot(h, w, 1)
# plots binodal
rho_p_list = rho_p_fixed*np.ones([len(rho_s_varied)])
rho_s_list = rho_s_varied
_ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, ax=ax1, show_lgnd=False, **kwargs)
############ VARIES POLYMER CONCENTRATION ####################
# creates subplot
ax2 = fig.add_subplot(h, w, 2)
# plots binodal
rho_p_list = rho_p_varied
rho_s_list = rho_s_fixed*np.ones([len(rho_p_varied)])
ax = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, ax=ax2, show_lgnd=False, **kwargs)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def figs1(T_range, sigma, T_room_C=20, T_cels=True, figsize=(5,5),
gridspec=10, lw=3, y_lim=[5.5,9.5], y_ticks=[6,7,8,9], d=0.5,
ax_fs=16, tk_fs=16):
"""Plots Figure S1 of the SI of Bjerrum length vs. T for fixed and
T-dependent dielectric constant."""
# computes Bjerrum lengths
T_arr, lB_A_arr, lB_0_A_arr = nacl.lB_comparison(T_range, sigma,
T_room_C=T_room_C)
# creates figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize,
gridspec_kw={'height_ratios': [gridspec,1]}, sharex=True)
# adjusts temperature based on requested unit
if T_cels:
T_arr -= K_2_C
unit_T = r'$^{\circ}C$'
else:
unit_T = 'K'
# plots Bjerrum lengths
ax1.plot(T_arr, lB_A_arr, lw=lw, label=r'$\epsilon(T)$')
ax1.plot(T_arr, lB_0_A_arr, lw=lw,
label=r'$\epsilon(T) = \epsilon($' + \
'{0:d}'.format(int(T_room_C)) + r'$^{\circ}C)$')
# formats plot
ax2.set_xlabel(r'$T$ [' + unit_T + ']', fontsize=ax_fs)
ax1.set_ylabel(r'$l_B$ $[\AA]$', fontsize=ax_fs)
ax1.tick_params(axis='both', labelsize=tk_fs)
ax2.tick_params(axis='both', labelsize=tk_fs)
### Creates broken axis
# see: https://matplotlib.org/stable/gallery/subplots_axes_and_figures/broken_axis.html
# set limits and ticks on upper axis
ax1.set_ylim(y_lim)
ax1.set_yticks(y_ticks)
# lower axis
ax2.set_ylim([0, 0.5])
ax2.set_yticks([0])
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(top=False, labeltop=False) # don't put ticks or labels at top
ax2.xaxis.tick_bottom()
# plots diagonal hatch marks on y-axis--"d" is ratio of height to length
kwargs = dict(marker=[(-1, -d), (1, d)], markersize=12,
linestyle="none", color='k', mec='k', mew=1, clip_on=False)
ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
return fig
def format_binodal(ax, x_label, units_rho, T_range, y_label=None, title=None,
x_lim=None, y_lim=None, T_cels=False, lgnd_out=True,
square_box=True, show_lgnd=True):
"""
Formats axes of a plot of the binodal projected onto a plane with
temperature as the vertical axis.
"""
if x_lim is not None:
ax.set_xlim(x_lim)
ax.set_xlabel('{0:s} [{1:s}]'.format(x_label, units_rho), fontsize=18)
# assumes that the y axis is temperature if another label is not given
if y_label is None:
T_unit = 'K'
if T_cels:
T_unit = r'$^{\circ}$C'
T_range = [T - K_2_C for T in T_range]
if y_lim is None:
ax.set_ylim(T_range)
else:
ax.set_ylim(y_lim)
ax.set_ylabel(r'$T$' + ' [{0:s}]'.format(T_unit), fontsize=18)
else:
ax.set_ylabel(y_label, fontsize=18)
ax.set_ylim(y_lim)
ax.tick_params(axis='both', labelsize=16)
if title is not None:
ax.set_title(title, fontsize=16)
# makes box of plot square
if square_box:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
# places legend outside of plot box
if show_lgnd:
if lgnd_out:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
ax.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y),
fontsize=14, frameon=False)
else:
ax.legend(fontsize=12, frameon=False)
return ax
def get_colors(cmap_name, n):
"""Returns list of colors using given colormap."""
cmap = plt.get_cmap(cmap_name)
return [cmap(val) for val in np.linspace(0, 1, n)]
def get_lgnd_labels(handles, labels, key):
"""Returns zipped handles and labels for which labels contains key."""
return [pair for pair in zip(handles, labels) if key in pair[1]]
def get_plot_dict_p_s(ch_var):
"""Returns a dictionary of key parameters for plotting based on varied component."""
d = {}
# polyelectrolyte density varied
if ch_var == 'p':
d = {'ch_var':'p', 'ch_fix':'s', 'order':[0,1], 'name_var':'Polymer'}
# salt density varied
elif ch_var == 's':
d = {'ch_var':'s', 'ch_fix':'p', 'order':[1,0], 'name_var':'Salt'}
else:
print('invalid ch_var character: choose s or p.')
return d
def line_3d(x, y, z, mode='lines', ms=8, op=0.1,
c='black', lw=8, fig=None):
"""
Plots line in 3D plot (plotly).
"""
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of fixed salt binodal
fig.add_trace(go.Scatter3d(
x=x, y=y, z=z,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c
),
line=dict(
color=c,
width=lw,
),
))
return fig
def no_salt(df, n_plot, left='rhoPCI', right='rhoPCII', x_label='polycation density',
p=None, n_tie_lines=0, plot_T=False, title='', line=False, marker=True,
w=500, h=500, units_rho='[beads/sigma^3]', deg_C=False,
leg1='supernatant', c1='blue', leg2='coacervate', c2='red'):
"""
Plots the binodal for a polyelectrolyte in solution without
salt.
"""
if plot_T:
y = 'T'
if deg_C:
y_label = 'Temperature [' + r'$^{\circ}$' + 'C]'
else:
y_label = 'Temperature [K]'
else:
y = 'BJ'
y_label = 'Bjerrum length'
# samples a uniform subset of the data
n = len(df)
skip = int(n / n_plot)
sample = df.iloc[::skip]
# creates figure object if not provided
if p is None:
p = figure(plot_width=w, plot_height=h)
# loads source for plot data
source = ColumnDataSource(sample)
if marker:
# creates circle glyph of polycation concentration in dilute phase
p.circle(x=left, y=y, source=source, size=10, color=c1,
legend_label=leg1)
# creates circle glyph of polycation concentration in coacervate phase
p.circle(x=right, y=y, source=source, size=10, color=c2,
legend_label=leg2)
if line:
# creates circle glyph of polycation concentration in dilute phase
p.line(x=left, y=y, source=source, line_width=6, line_color=c1,
legend_label=leg1)
# creates circle glyph of polycation concentration in coacervate phase
p.line(x=right, y=y, source=source, line_width=6, line_color=c2,
legend_label=leg2)
# adds tie lines
if n_tie_lines > 0:
skip_tie_lines = int(n / n_tie_lines)
df_tie_lines = df.iloc[::skip_tie_lines]
for t in range(len(df_tie_lines)):
p.line([df_tie_lines[left].iloc[t], df_tie_lines[right].iloc[t]],
[df_tie_lines[y].iloc[t], df_tie_lines[y].iloc[t]],
color='black')
# adds plot labels
p.xaxis.axis_label = x_label + ' ' + units_rho
p.xaxis.axis_label_text_font_size = '18pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = y_label
p.yaxis.axis_label_text_font_size = '18pt'
p.yaxis.major_label_text_font_size = '14pt'
# adds title
p.title.text = title
p.title.text_font_size = '16pt'
# formats legend
p.legend.location = "bottom_right"
p.legend.label_text_font_size = '14pt'
p.legend.click_policy = 'hide'
# creates hover feature to read data
hover = HoverTool()
hover.tooltips=[
(y_label, '@' + y),
(x_label + ' (I)', '@' + left),
(x_label + ' (II)', '@' + right)
]
p.add_tools(hover)
return p
def pt_3d(x, y, z, mode='markers', ms=8, op=1,
c='black', fig=None):
"""
Plots line in 3D plot (plotly).
"""
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of fixed salt binodal
fig.add_trace(go.Scatter3d(
x=[x], y=[y], z=[z],
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c
),
))
return fig
def salt(df, n_plot, p=None, n_tie_lines=0):
"""
Plots the binodal for a polyelectrolyte in solution with salt
at a fixed Bjerrum length on rho_p vs. rho_s axes.
"""
# samples a uniform subset of the data
n = len(df)
skip = int(n / n_plot)
sample = df.iloc[::skip]
# creates figure object if not provided
if p is None:
p = figure()
# loads source for plot data
source = ColumnDataSource(sample)
# creates circle glyph of polycation concentration in dilute phase
p.circle(x='rhoPAI', y='rhoAI', source=source, size=10, color='red', legend_label='dilute phase (I)')
# creates circle glyph of polycation concentration in coacervate phase
p.circle(x='rhoPAII', y='rhoAII', source=source, size=10, color='blue', legend_label='coacervate phase (II)')
# draws tie lines
if n_tie_lines > 0:
skip_tie_lines = int(n / n_tie_lines)
df_tie_lines = df.iloc[::skip_tie_lines]
for t in range(len(df_tie_lines)):
x = [df_tie_lines['rhoPAI'].iloc[t], df_tie_lines['rhoPAII'].iloc[t]]
y = [df_tie_lines['rhoAI'].iloc[t], df_tie_lines['rhoAII'].iloc[t]]
p.line(x, y, color='black')
# adds plot labels
p.xaxis.axis_label = 'polyanion number density'
p.xaxis.axis_label_text_font_size = '18pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = 'anion number density'
p.yaxis.axis_label_text_font_size = '18pt'
p.yaxis.major_label_text_font_size = '14pt'
# formats legend
p.legend.location = "top_right"
p.legend.label_text_font_size = '16pt'
p.legend.click_policy = 'hide'
# creates hover feature to read data
hover = HoverTool()
hover.tooltips=[
('Anion Density (I)', '@rhoAI'),
('Anion Density (II)', '@rhoAII'),
('Polyanion density (I)', '@rhoPAI'),
('Polyanion density (II)', '@rhoPAII')
]
p.add_tools(hover)
return p
def sort_lgnd_labels(ax, sorted_keys):
"""Sorts legend labels based on order of keywords."""
# gets handles and labels from legend
handles, labels = ax.get_legend_handles_labels()
# sorts by keywords
lgnd_sorted = []
for key in sorted_keys:
lgnd_sorted += get_lgnd_labels(handles, labels, key)
# removes redundant entries
lgnd_unique = [(0,0)] # primer entry
[lgnd_unique.append(pair) for pair in lgnd_sorted if pair[1] \
not in list(zip(*lgnd_unique))[1]]
# removes primer entry
lgnd_unique = lgnd_unique[1:]
# unzips
handles_sorted, labels_sorted = zip(*lgnd_unique)
# adds legend outside plot
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
ax.legend(handles_sorted, labels_sorted, loc='center left',
bbox_to_anchor=(legend_x, legend_y),
fontsize=14, frameon=False)
return ax
def validate_fit(data_pred, df_exp, ch_var, rho_var_list, rho_fix, colors,
beads_2_M_opt, T_range=[273.15, 323.15], lw=2, sigma=None,
conv_vals=False, x_var='polyelectrolyte'):
"""
Validates fit of sigma to experiments.
"""
if conv_vals:
rho_p = df_exp['rho_p [M]'].to_numpy(dtype=float)
rho_p_conv = df_exp['rho_p (conv) [M]'].to_numpy(dtype=float)
rho_s = df_exp['rho_s [M]'].to_numpy(dtype=float)
rho_s_conv = df_exp['rho_s (conv) [M]'].to_numpy(dtype=float)
# matches polymer and salt values with fixed and varied concentrations
rho_var_list_conv = []
if ch_var == 'p':
for rho_var in rho_var_list:
i = np.where(rho_var == rho_p)[0][0]
rho_var_list_conv += [rho_p_conv[i]]
rho_fix_conv = rho_s_conv[np.where(rho_fix == rho_s)[0][0]]
elif ch_var == 's':
for rho_var in rho_var_list:
i = np.where(rho_var == rho_s)[0][0]
rho_var_list_conv += [rho_s_conv[i]]
rho_fix_conv = rho_p_conv[np.where(rho_fix == rho_p)[0][0]]
# polymer-temperature plane
if conv_vals:
ax = binodal_vary_rho(data_pred, rho_var_list_conv, rho_fix_conv, ch_var,
beads_2_M_opt,
x_var=x_var, x_label=r'$\rho_p$',
sigma=sigma, T_range=T_range, marker='', lw=lw,
colors=colors, T_cels=True)
else:
ax = binodal_vary_rho(data_pred, rho_var_list, rho_fix, ch_var,
beads_2_M_opt,
x_var=x_var, x_label=r'$\rho_p$',
sigma=sigma, T_range=T_range, marker='', lw=lw,
colors=colors, T_cels=True)
# plots experimental results
for i in range(len(df_exp)):
rho_p, rho_s, T_exp, rho_p_sup, rho_p_co = nacl.read_df_exp(df_exp, i)
if ch_var == 'p':
rho_var_exp = rho_p
rho_fix_exp = rho_s
elif ch_var == 's':
rho_var_exp = rho_s
rho_fix_exp = rho_p
else:
print('Please select s or p as ch_var')
if (rho_var_exp in rho_var_list) and (rho_fix_exp == rho_fix):
# determines color
color = [colors[i] for i in range(len(colors)) if rho_var_list[i] == rho_var_exp][0]
# plots desired species concentration
if x_var == 'polyanion' or x_var == 'polycation':
# if just plotting polyanion, divides total polymer
# concentration in half (assumes symmetric solution)
rho_sup = rho_p_sup / 2
rho_co = rho_p_co / 2
elif x_var == 'polyelectrolyte':
rho_sup = rho_p_sup
rho_co = rho_p_co
# plots supernatant and coacervate compositions
ax.plot(rho_sup, T_exp, color=color, marker='o', label='supernatant')
ax.plot(rho_co, T_exp, color=color, marker='^', label='coacervate')
| 2.8125
| 3
|
tutorial.py
|
tlmolane/Volt
| 0
|
12774354
|
<gh_stars>0
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
private_key = rsa.generate_private_key(
public_exponent = 65537,
key_size = 4096,
backend = default_backend()
)
public_key = private_key.public_key()
pem = private_key.private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
#encryption_algorithm = serialization.BestAvailableEncryption(b'test')
encryption_algorithm=serialization.BestAvailableEncryption(b'test')
#encryption_algorithm=serialization.NoEncryption()
)
pem_2 = public_key.public_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PublicFormat.SubjectPublicKeyInfo
)
# for i in pem.splitlines():
# print(i)
with open('/home/zeefu/Documents/Volt/private_key.pem', 'wb') as f:
f.write(pem)
f.close()
#
with open('/home/zeefu/Documents/Volt/public_key.pem', 'wb') as f:
f.write(pem_2)
f.close()
#
# with open('/home/zeefu/private_key.pem', 'rb') as f:
# private_key = serialization.load_pem_private_key(
# f.read(),
# password = b'<PASSWORD>',
# backend = default_backend()
# )
| 2.296875
| 2
|
algorithms/pripel/tracematcher.py
|
samadeusfp/ELPaaS
| 4
|
12774355
|
<filename>algorithms/pripel/tracematcher.py
import sys
from levenshtein import levenshtein as levenshtein
from collections import deque
from pm4py.objects.log import log as event_log
from scipy.optimize import linear_sum_assignment
import random
import numpy as np
class TraceMatcher:
def __init__(self,tv_query_log,log):
print("trace_matcher kreiiert")
self.__timestamp = "time:timestamp"
self.__allTimestamps = list()
self.__allTimeStampDifferences = list()
self.__distanceMatrix = dict()
self.__trace_variants_query = self.__addTraceToAttribute(tv_query_log)
self.__trace_variants_log = self.__addTraceToAttribute(log)
attributeBlacklist = self.__getBlacklistOfAttributes()
self.__distributionOfAttributes,self.__eventStructure = self.__getDistributionOfAttributesAndEventStructure(log, attributeBlacklist)
self.__query_log = tv_query_log
self.__log = log
def __addTraceToAttribute(self, log):
trace_variants = dict()
for trace in log:
variant = ""
for event in trace:
variant = variant + "@" + event["concept:name"]
trace.attributes["variant"] = variant
traceSet = trace_variants.get(variant,set())
traceSet.add(trace)
trace_variants[variant] = traceSet
return trace_variants
def __getBlacklistOfAttributes(self):
blacklist = set()
blacklist.add("concept:name")
blacklist.add(self.__timestamp)
blacklist.add("variant")
blacklist.add("EventID")
blacklist.add("OfferID")
blacklist.add("matricola")
return blacklist
def __handleVariantsWithSameCount(self,variants,traceMatching):
for variant in variants:
for trace in self.__trace_variants_query[variant]:
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[variant].pop()
del self.__trace_variants_log[variant]
del self.__trace_variants_query[variant]
def __handleVariantsUnderrepresentedInQuery(self,variants,traceMatching):
for variant in variants:
if variant in self.__trace_variants_query:
for trace in self.__trace_variants_query.get(variant,list()):
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[variant].pop()
del self.__trace_variants_query[variant]
def __handleVariantsOverrepresentedInQuery(self,variants,traceMatching):
for variant in variants:
for trace in self.__trace_variants_log[variant]:
traceFromQuery = self.__trace_variants_query[variant].pop()
traceMatching[traceFromQuery.attributes["concept:name"]] = trace
del self.__trace_variants_log[variant]
def __getDistanceVariants(self,variant1,variant2):
if variant1 not in self.__distanceMatrix:
self.__distanceMatrix[variant1] = dict()
if variant2 not in self.__distanceMatrix[variant1]:
distance = levenshtein(variant1, variant2)
self.__distanceMatrix[variant1][variant2] = distance
else:
distance = self.__distanceMatrix[variant1][variant2]
return distance
def __findCLosestVariantInLog(self,variant,log):
closestVariant = None
closestDistance = sys.maxsize
for comparisonVariant in log.keys():
distance = self.__getDistanceVariants(variant,comparisonVariant)
if distance < closestDistance:
closestVariant = comparisonVariant
closestDistance = distance
return closestVariant
def __findOptimalMatches(self):
rows = list()
for traceQuery in self.__query_log:
row = list()
for traceLog in self.__log:
row.append(self.__getDistanceVariants(traceQuery.attributes["variant"],traceLog.attributes["variant"]))
rows.append(row)
distanceMatrix = np.array(rows)
row_ind, col_ind = linear_sum_assignment(distanceMatrix)
traceMatching = dict()
for (traceQueryPos, traceLogPos) in zip(row_ind, col_ind):
traceMatching[self.__query_log[traceQueryPos].attributes["concept:name"]] = self.__log[traceLogPos]
return traceMatching
def __matchTraces(self,traceMatching):
for variant in self.__trace_variants_query.keys():
closestVariant = self.__findCLosestVariantInLog(variant,self.__trace_variants_log)
for trace in self.__trace_variants_query[variant]:
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[closestVariant].pop()
if not self.__trace_variants_log[closestVariant]:
del self.__trace_variants_log[closestVariant]
if self.__trace_variants_log:
closestVariant = self.__findCLosestVariantInLog(variant, self.__trace_variants_log)
else:
return
def __getTraceMatching(self):
traceMatching = dict()
variantsWithSameCount = set()
variantsUnderepresentedInQuery = set()
variantsOverepresentedInQuery = set()
for variant in self.__trace_variants_log.keys():
if len(self.__trace_variants_log[variant]) == len(self.__trace_variants_query.get(variant,set())):
variantsWithSameCount.add(variant)
elif len(self.__trace_variants_log[variant]) > len(self.__trace_variants_query.get(variant,set())) and len(self.__trace_variants_query.get(variant,set())) != set():
variantsUnderepresentedInQuery.add(variant)
elif len(self.__trace_variants_log[variant]) < len(self.__trace_variants_query.get(variant,0)):
variantsOverepresentedInQuery.add(variant)
self.__handleVariantsWithSameCount(variantsWithSameCount,traceMatching)
self.__handleVariantsUnderrepresentedInQuery(variantsUnderepresentedInQuery,traceMatching)
self.__handleVariantsOverrepresentedInQuery(variantsOverepresentedInQuery,traceMatching)
self.__matchTraces(traceMatching)
return traceMatching
def __resolveTrace(self,traceInQuery,correspondingTrace,distributionOfAttributes):
eventStacks = self.__transformTraceInEventStack(correspondingTrace)
previousEvent = None
for eventNr in range(0,len(traceInQuery)):
currentEvent = traceInQuery[eventNr]
activity = currentEvent["concept:name"]
latestTimeStamp = self.__getLastTimestampTraceResolving(traceInQuery,eventNr)
if activity in eventStacks:
currentEvent = self.__getEventAndUpdateFromEventStacks(activity,eventStacks)
if currentEvent[self.__timestamp] < latestTimeStamp:
currentEvent[self.__timestamp] = self.__getNewTimeStamp(previousEvent,currentEvent, eventNr,distributionOfAttributes)
else:
currentEvent = self.__createRandomNewEvent(currentEvent,activity,distributionOfAttributes,previousEvent,eventNr)
traceInQuery[eventNr] = currentEvent
previousEvent = currentEvent
self.__debugCheckTimeStamp(traceInQuery, eventNr)
return traceInQuery
def __getEventAndUpdateFromEventStacks(self,activity,eventStacks):
event = eventStacks[activity].popleft()
if not eventStacks[activity]:
del eventStacks[activity]
return event
def __debugTraceTimestamps(self,trace):
for eventNr in range(0):
self.__debugCheckTimeStamp(trace,eventNr)
def __debugCheckTimeStamp(self,trace,eventNr):
if eventNr > 0:
if trace[eventNr -1][self.__timestamp] > trace[eventNr][self.__timestamp]:
print("Fuck")
def __getLastTimestampTraceResolving(self,trace,eventNr):
if eventNr == 0:
latestTimeStamp = trace[eventNr][self.__timestamp]
else:
latestTimeStamp = trace[eventNr - 1][self.__timestamp]
return latestTimeStamp
def __transformTraceInEventStack(self,trace):
eventStacks = dict()
for event in trace:
stack = eventStacks.get(event["concept:name"],deque())
stack.append(event)
eventStacks[event["concept:name"]] = stack
return eventStacks
def __createRandomNewEvent(self,event,activity,distributionOfAttributes,previousEvent,eventNr):
for attribute in self.__eventStructure[activity]:
if attribute in distributionOfAttributes and attribute not in event and attribute != self.__timestamp:
event[attribute] = random.choice(distributionOfAttributes[attribute])
elif attribute == self.__timestamp:
event[self.__timestamp] = self.__getNewTimeStamp(previousEvent,event, eventNr,distributionOfAttributes)
return event
def __getNewTimeStamp(self,previousEvent,currentEvent,eventNr,distributionOfAttributes):
if eventNr == 0:
timestamp = random.choice(self.__allTimestamps)
else:
timestamp = previousEvent[self.__timestamp] + random.choice(distributionOfAttributes[self.__timestamp][previousEvent["concept:name"]].get(currentEvent["concept:name"], self.__allTimeStampDifferences))
return timestamp
def __resolveTraceMatching(self,traceMatching,distributionOfAttributes,fillUp):
log = event_log.EventLog()
for trace in self.__query_log:
traceID = trace.attributes["concept:name"]
if fillUp or traceID in traceMatching:
matchedTrace = self.__resolveTrace(trace,traceMatching.get(traceID,list()),distributionOfAttributes)
self.__debugTraceTimestamps(matchedTrace)
log.append(matchedTrace)
return log
def __handleAttributesOfDict(self, dictOfAttributes, distributionOfAttributes, attributeBlacklist,previousEvent=None):
for attribute in dictOfAttributes.keys():
if attribute not in attributeBlacklist:
distribution = distributionOfAttributes.get(attribute, list())
distribution.append(dictOfAttributes[attribute])
distributionOfAttributes[attribute] = distribution
elif attribute == self.__timestamp and previousEvent is not None:
self.__handleTimeStamp(distributionOfAttributes,previousEvent,dictOfAttributes)
def __handleTimeStamp(self, distributionOfAttributes, previousEvent, currentEvent):
timeStampsDicts = distributionOfAttributes.get(self.__timestamp, dict())
activityDict = timeStampsDicts.get(previousEvent["concept:name"],dict())
timeStampsDicts[previousEvent["concept:name"]] = activityDict
distribution = activityDict.get(currentEvent["concept:name"], list())
timeStampDifference = currentEvent[self.__timestamp] - previousEvent[self.__timestamp]
distribution.append(timeStampDifference)
activityDict[currentEvent["concept:name"]] = distribution
distributionOfAttributes[self.__timestamp] = timeStampsDicts
self.__allTimestamps.append(currentEvent[self.__timestamp])
self.__allTimeStampDifferences.append(timeStampDifference)
def __getDistributionOfAttributesAndEventStructure(self, log, attributeBlacklist):
distributionOfAttributes = dict()
eventStructure = dict()
for trace in log:
self.__handleAttributesOfDict(trace.attributes,distributionOfAttributes,attributeBlacklist)
previousEvent = None
currentEvent = None
for eventNr in range(0,len(trace)):
if currentEvent is not None:
previousEvent = currentEvent
currentEvent = trace[eventNr]
self.__handleAttributesOfDict(currentEvent,distributionOfAttributes,attributeBlacklist,previousEvent)
if not currentEvent["concept:name"] in eventStructure:
attributesOfEvent = set(currentEvent.keys())
attributesOfEvent.remove("concept:name")
eventStructure[currentEvent["concept:name"]] = attributesOfEvent
return distributionOfAttributes, eventStructure
def matchQueryToLog(self,fillUp=True,greedy=False):
print("tm1")
if greedy:
traceMatching = self.__getTraceMatching()
print("tm2")
else:
traceMatching = self.__findOptimalMatches()
print("tm3")
matched_log = self.__resolveTraceMatching(traceMatching,self.__distributionOfAttributes,fillUp)
print("tm4")
return matched_log
def getAttributeDistribution(self):
return self.__distributionOfAttributes
def getTimeStampData(self):
return self.__allTimestamps,self.__allTimeStampDifferences
| 2.25
| 2
|
internal/nodejs_jest_test/test_sources_aspect.bzl
|
ColinHeathman/bazel_rules_nodejs_contrib
| 14
|
12774356
|
<reponame>ColinHeathman/bazel_rules_nodejs_contrib<filename>internal/nodejs_jest_test/test_sources_aspect.bzl<gh_stars>10-100
def _test_sources_aspect_impl(target, ctx):
result = depset()
if hasattr(ctx.rule.attr, "tags") and "NODE_MODULE_MARKER" in ctx.rule.attr.tags:
return struct(node_test_sources=result)
if hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
if hasattr(dep, "node_test_sources"):
result = depset(transitive=[result, dep.node_test_sources])
elif hasattr(target, "files"):
result = depset([f for f in target.files.to_list() if f.path.endswith(".test.js")],
transitive=[result])
return struct(node_test_sources=result)
test_sources_aspect = aspect(
_test_sources_aspect_impl,
attr_aspects=["deps"],
)
| 1.882813
| 2
|
api/data/models/concept_class.py
|
CO-CONNECT/mapping-pipeline
| 4
|
12774357
|
from django.db import models
class ConceptClass(models.Model):
concept_class_id = models.CharField(
primary_key=True, max_length=20
)
concept_class_name = models.CharField(
max_length=255
)
concept_class_concept_id = models.IntegerField(
)
class Meta:
managed = False
db_table = 'omop"."concept_class'
| 2.046875
| 2
|
test/test_bridge.py
|
jasonpjacobs/systemrdl-compiler
| 141
|
12774358
|
from unittest_utils import RDLSourceTestCase
class TestBridge(RDLSourceTestCase):
def test_bridge(self):
top = self.compile(
["rdl_src/bridge.rdl"],
"some_bridge"
)
self.assertEqual(
top.find_by_path("some_bridge.ahb.ahb_credits").absolute_address,
0x0
)
self.assertEqual(
top.find_by_path("some_bridge.ahb.ahb_stat").absolute_address,
0x20
)
self.assertEqual(
top.find_by_path("some_bridge.axi.axi_credits").absolute_address,
0x0
)
self.assertEqual(
top.find_by_path("some_bridge.axi.axi_stat").absolute_address,
0x40
)
def test_bridge_errors(self):
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"illegal_wrapper",
r"The 'bridge' property can only be applied to the root address map"
)
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"not_enough_addrmaps",
r"Addrmap 'not_enough_addrmaps' is a bridge and shall contain 2 or more sub-addrmaps"
)
self.assertRDLCompileError(
["rdl_err_src/err_bridge.rdl"],
"illegal_children",
r"Addrmap 'illegal_children' is a bridge which can only contain other addrmaps. Contains a child instance 'y' which is a reg"
)
| 2.734375
| 3
|
PYTHON/Fibanacci_numbers_by_replacing_prime_numbers_and_multiples_of_5_by_0.py
|
ayushyado/HACKTOBERFEST2021-2
| 125
|
12774359
|
<gh_stars>100-1000
def is_prime(n):
if n > 1:
for i in range(2, n // 2 + 1):
if (n % i) == 0:
return False
else:
return True
else:
return False
def fibonacci(n):
n1, n2 = 1, 1
count = 0
if n == 1:
print(n1)
else:
while count < n:
if not is_prime(n1) and n1 % 5 != 0:
print(n1, end=' ')
else:
print(0, end=' ')
n3 = n1 + n2
n1 = n2
n2 = n3
count += 1
n = int(input("Enter the number:"))
fibonacci(n)
| 4.03125
| 4
|
hackerrank/Algorithms/Lego Blocks/solution.py
|
ATrain951/01.python-com_Qproject
| 4
|
12774360
|
<gh_stars>1-10
#!/bin/python3
import os
#
# Complete the legoBlocks function below.
#
def legoBlocks(n, m):
#
# Write your code here.
#
mod = 1000000007 # 10 ** 9 + 7
height = n % mod
width = m % mod
# The number of combinations to build a single row
row_combinations = [1, 1, 2, 4]
# Build row combinations up to this wall's width
while len(row_combinations) <= width:
row_combinations.append(sum(row_combinations[-4:]) % mod)
# Compute total combinations for constructing a wall of height N of varying widths
total = [pow(c, height, mod) for c in row_combinations]
# Find the number of unstable wall configurations for a wall of height N of varying widths
unstable = [0, 0]
for i in range(2, width + 1):
unstable.append(sum((total[j] - unstable[j]) * total[i - j] for j in range(1, i)) % mod)
# Return the number of stable wall combinations
return (total[width] - unstable[width]) % mod
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
nm = input().split()
n = int(nm[0])
m = int(nm[1])
result = legoBlocks(n, m)
fptr.write(str(result) + '\n')
fptr.close()
| 3.546875
| 4
|
src/atcoder/abc014/d/sol_6.py
|
kagemeka/competitive-programming
| 1
|
12774361
|
import typing
import sys
import numpy as np
import numba as nb
@nb.njit
def csgraph_to_directed(g: np.ndarray) -> np.ndarray:
m = len(g)
g = np.vstack((g, g))
g[m:, :2] = g[m:, 1::-1]
return g
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx, original_idx
@nb.njit
def euler_tour_edge(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 3]:
n = g[:, :2].max() + 1
parent = np.full(n, -1, np.int64)
depth = np.zeros(n, np.int64)
tour = np.empty(n * 2, np.int64)
st = [root]
for i in range(2 * n):
u = st.pop()
tour[i] = u
if u < 0: continue
st.append(-u - 1)
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if v == parent[u]: continue
parent[v] = u
depth[v] = depth[u] + 1
st.append(v)
return tour, parent, depth
@nb.njit
def euler_tour_node(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 4]:
tour, parent, depth = euler_tour_edge(g, edge_idx, root)
n = len(tour) >> 1
tour = tour[:-1]
first_idx = np.full(n, -1, np.int64)
for i in range(2 * n - 1):
u = tour[i]
if u < 0:
tour[i] = parent[~u]
continue
first_idx[u] = i
return tour, first_idx, parent, depth
@nb.njit
def uf_build(n: int) -> np.ndarray:
return np.full(n, -1, np.int64)
@nb.njit
def uf_find(uf: np.ndarray, u: int) -> int:
if uf[u] < 0: return u
uf[u] = uf_find(uf, uf[u])
return uf[u]
@nb.njit
def uf_unite(
uf: np.ndarray,
u: int,
v: int,
) -> typing.NoReturn:
u, v = uf_find(uf, u), uf_find(uf, v)
if u == v: return
if uf[u] > uf[v]: u, v = v, u
uf[u] += uf[v]
uf[v] = u
@nb.njit
def lca(
g: np.ndarray,
edge_idx: np.ndarray,
vu: np.ndarray,
) -> np.ndarray:
m = len(vu)
tour, parent, _ = euler_tour_edge(g, edge_idx, 0)
n = len(tour) >> 1
first_idx = np.full(n, -1, np.int64)
for i in range(len(tour)):
u = tour[i]
if u < 0: continue
first_idx[u] = i
for i in range(m):
v, u = vu[i]
if first_idx[v] < first_idx[u]: vu[i] = vu[i, ::-1]
vu, query_idx, original_idx = sort_csgraph(n, vu)
_lca = np.empty(m, np.int64)
uf = uf_build(n)
ancestor = np.arange(n)
for v in tour[:-1]:
if v >= 0: continue
v = ~v
for j in range(query_idx[v], query_idx[v + 1]):
u = vu[j, 1]
_lca[original_idx[j]] = ancestor[uf_find(uf, u)]
p = parent[v]
uf_unite(uf, v, p)
ancestor[uf_find(uf, p)] = p
return _lca
@nb.njit((nb.i8[:, :], nb.i8[:, :]), cache=True)
def solve(xy: np.ndarray, ab: np.ndarray) -> typing.NoReturn:
n = len(xy) + 1
g = csgraph_to_directed(xy)
g, edge_idx, _ = sort_csgraph(n, g)
_, _, depth = euler_tour_edge(g, edge_idx, 0)
_lca = lca(g, edge_idx, ab)
for i in range(len(ab)):
u, v = ab[i]
l = _lca[i]
d = depth[u] + depth[v] - 2 * depth[l] + 1
print(d)
def main() -> typing.NoReturn:
n = int(input())
I = np.array(
sys.stdin.read().split(),
dtype=np.int64,
)
xy = I[:2 * (n - 1)].reshape(n - 1, 2) - 1
ab = I[2 * n - 1:].reshape(-1, 2) - 1
solve(xy, ab)
main()
| 2.609375
| 3
|
cross3d/motionbuilder/motionbuilderscene.py
|
vedantirb/cross3d
| 129
|
12774362
|
##
# \namespace cross3d.softimage.motionbuilderscene
#
# \remarks The MotionBuilderScene class will define all the operations for Motion Builder scene interaction.
#
# \author douglas
# \author Blur Studio
# \date 06/21/12
#
import pyfbsdk as mob
from cross3d.abstract.abstractscene import AbstractScene
from PyQt4.QtGui import QFileDialog
#------------------------------------------------------------------------------------------------------------------------
class MotionBuilderScene( AbstractScene ):
def __init__( self ):
self._fbapp = mob.FBApplication()
AbstractScene.__init__( self )
def saveFileAs(self, filename=''):
"""
Implements AbstractScene.saveFileAs to save the current scene to the inputed name specified. If no name is supplied,
then the user should be prompted to pick a filename
:param filename: Name of the file to save
:return : Success, Bool
"""
if not filename:
filename = unicode(QFileDialog.getSaveFileName(None, 'Save Motion Builder File', '', 'FBX (*.fbx);;All files (*.*)'))
print 'filename', filename, self._fbapp
if filename:
return self._fbapp.FileSave(unicode(filename).encode('utf8'))
return False
def retarget(self, inputRigPath, inputAnimationPath, outputRigPath, outputAnimationPath):
return False
# register the symbol
import cross3d
cross3d.registerSymbol('Scene', MotionBuilderScene)
| 2.359375
| 2
|
src/secimtools/visualManager/module_scatter.py
|
GalaxyDream/SECIMTools
| 0
|
12774363
|
<reponame>GalaxyDream/SECIMTools<filename>src/secimtools/visualManager/module_scatter.py<gh_stars>0
######################################################################################
# Date: 2016/July/11
#
# Module: module_2DScatter.py
#
# VERSION: 0.9
#
# AUTHOR: <NAME> (<EMAIL>);
# edited by <NAME>(<EMAIL>)
#
# DESCRIPTION: This module contains a primary method (quickPlot)
# and two depreciated methods which are not to be used
#
# makeScatter is to be called from other scripts which require a graph
#######################################################################################
def scatter2D(ax,x,y,colorList,ec='black'):
"""
This function is to be called by makeScatter2D, creates a 2D scatter plot on a given axis with
colors determined by the given colorHandler or an optional override
:Arguments:
:type ax: matplotlib Axis2D
:param ax: Axis on which scatter plot will be drawn
:type x: list of floats
:param x: list of x values to be plotted
:type y: list of floats
:param y: list of y values to be plotted
:type colorList: list
:param colorList: list of colors to be used for plotting
:type ec: str
:param ec: Edge color for markers
:Return:
:type ax: Matplotlib Axis
:param ax: axis with scatter plotted onto it
"""
ax.scatter(x,y,color=colorList,marker='o',s=50,edgecolors=ec)
return ax
def scatter3D(ax,x,y,z,colorList):
"""
This function is to be called by makeScatter3D, creates a 3D scatter plot on a given axis with
colors determined by the given colorHandler.
:Arguments:
:type ax: matplotlib Axis3D
:param ax: Axis on which scatter plot will be drawn
:type x: list of floats
:param x: list of x values to be plotted
:type y: list of floats
:param y: list of y values to be plotted
:type z: list of floats
:param z: list of z values to be plotted
:type colorList: list
:param colorList: list of colors to be used for plotting
:Return:
:type ax: Matplotlib Axis
:param ax: axis with scatter plotted onto it
"""
ax.scatter(xs=x,ys=y,zs=z,c=colorList,marker='o',s=50,depthshade=False)
return ax
| 2.5625
| 3
|
stix_shifter_modules/csa/stix_translation/data_mapper.py
|
remkohdev/stix-shifter
| 0
|
12774364
|
from os import path
import json
import re
from stix_shifter_utils.stix_translation.src.utils.exceptions import DataMappingException
from stix_shifter_utils.modules.base.stix_translation.base_data_mapper import BaseDataMapper
def _fetch_mapping(dialect=''):
try:
if dialect != '':
dialect = dialect + '_'
basepath = path.dirname(__file__)
filepath = path.abspath(
path.join(basepath, "json", dialect + "from_stix_map.json"))
map_file = open(filepath).read()
map_data = json.loads(map_file)
return map_data
except Exception as ex:
print('exception in main():', ex)
return {}
class DataMapper(BaseDataMapper):
def map_object(self, stix_object_name):
self.map_data = _fetch_mapping(self.dialect)
if stix_object_name in self.map_data and self.map_data[stix_object_name] != None:
return self.map_data[stix_object_name]
else:
raise DataMappingException(
"Unable to map object `{}` into SQL".format(stix_object_name))
def map_field(self, stix_object_name, stix_property_name):
self.map_data = _fetch_mapping(self.dialect)
if stix_object_name in self.map_data and stix_property_name in self.map_data[stix_object_name]["fields"]:
return self.map_data[stix_object_name]["fields"][stix_property_name]
else:
return []
def map_selections(self):
try:
filepath = path.abspath(
path.join(self.basepath, "json", self.dialect + "_event_fields.json"))
sql_fields_file = open(filepath).read()
sql_fields_json = json.loads(sql_fields_file)
# Temporary default selections, this will change based on upcoming config override and the STIX pattern that is getting converted to SQL.
field_list = sql_fields_json['default']
sql_select = ", ".join(field_list)
return sql_select
except Exception as ex:
print('Exception while reading sql fields file:', ex)
return {}
| 2.25
| 2
|
models/RSA.py
|
judithfan/graphcomm
| 2
|
12774365
|
<reponame>judithfan/graphcomm
from __future__ import division
import os
import thread
import subprocess
import numpy as np
import sys
analysis_path = '../analysis'
if analysis_path not in sys.path:
sys.path.append(analysis_path)
import analysis_helpers as h
### python RSA.py --wppl BDA --perception human multimodal_fc6 multimodal_conv42 multimodal_pool1 --pragmatics combined S0 --production cost nocost --split_type balancedavg1 balancedavg2 balancedavg3 balancedavg4 balancedavg5
### python RSA.py --wppl flatten
### python RSA.py --wppl evaluate --perception human --pragmatics combined S0 --production cost nocost --split_type balancedavg1 balancedavg2 balancedavg3 balancedavg4 balancedavg5
### python RSA.py --wppl BDA-enumerate --sim_scaling_lb 1 --sim_scaling_ub 200 --step_size 2 --split_type balancedavg1
### python RSA.py --wppl AIS --perception multimodal_fc6 --production cost --pragmatics combined --num_ais_samples 2 --split_type balancedavg1
def run_bda(perception, pragmatics, production, split_type):
if not os.path.exists('./bdaOutput'):
os.makedirs('./bdaOutput')
if not os.path.exists('./bdaOutput/{}_{}'.format(perception,split_type)):
os.makedirs('./bdaOutput/{}_{}'.format(perception,split_type))
os.makedirs('./bdaOutput/{}_{}/raw'.format(perception,split_type))
# check to make sure we do not already have output
if not os.path.exists('./bdaOutput/{}_{}/raw/{}_{}_{}_{}Params.csv'.format(perception,\
split_type,\
perception,\
pragmatics,\
production,\
split_type)):
#sample: models/bdaOutput/human_balancedavg1/raw/human_combined_cost_balancedavg1Params.csv
cmd_string = 'webppl BDA.wppl --require ./refModule/ -- --perception {} --pragmatics {} --production {} --splitType {}'.format(perception, pragmatics, production, split_type)
print 'Running: {}'.format(cmd_string)
thread.start_new_thread(os.system,(cmd_string,))
else:
print 'Already have BDA output for model {} {} {} {}. Not proceeding unless files moved/renamed.'.format(perception,pragmatics,production,split_type)
def flatten_bda_output(adaptor_types = ['multimodal_pool1','multimodal_conv42','multimodal_fc6', 'human'], verbosity=1):
h.flatten_param_posterior(adaptor_types = adaptor_types,verbosity=verbosity)
def run_bda_enumerate(simScaling, split_type):
if not os.path.exists('./enumerateOutput'):
os.makedirs('./enumerateOutput')
if not os.path.exists(os.path.join('./enumerateOutput',split_type)):
os.makedirs(os.path.join('./enumerateOutput',split_type))
cmd_string = 'webppl BDA-enumerate.wppl --require ./refModule/ -- --simScaling {} --splitType {}'.format(simScaling, split_type)
print 'Running: {}'.format(cmd_string)
thread.start_new_thread(os.system,(cmd_string,))
def run_evaluate(perception, pragmatics, production, split_type):
if not os.path.exists('./evaluateOutput'):
os.makedirs('./evaluateOutput')
out_dir = './evaluateOutput/{}_{}_{}_{}'.format(perception,\
pragmatics,\
production,\
split_type)
if not os.path.exists(out_dir):
# os.makedirs(out_dir)
cmd_string = 'webppl evaluate.wppl --require ./refModule/ -- --paramSetting {}_{}_{} --adaptorType {} --splitType {}'.format(perception, pragmatics, production, perception, split_type)
print 'Running: {}'.format(cmd_string)
thread.start_new_thread(os.system,(cmd_string,))
else:
print 'Already have evaluation output for model {} {} {} {}. Not proceeding unless files moved/renamed.'.format(perception,pragmatics,production,split_type)
def run_ais(perception, pragmatics, production, split_type, num_samp):
if not os.path.exists('./aisOutput'):
os.makedirs('./aisOutput')
cmd_string = 'webppl BF.wppl --require ./refModule/ -- --perception {} --pragmatics {} --production {} --splitType {}'.format(perception, pragmatics, production, split_type)
print '{} | Running: {}'.format(num_samp,cmd_string)
thread.start_new_thread(os.system,(cmd_string,))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--wppl', type=str, help='options: BDA | BDA-enumerate | evaluate | AIS', default='BDA')
parser.add_argument('--perception', nargs='+', type=str, \
help='option: options: human| multimodal_conv42 | multimodal_pool1 | multimodal_fc6',\
default = 'multimodal_conv42')
parser.add_argument('--pragmatics', nargs='+', type=str, \
help='option: combined | S1 | S0',\
default = 'combined')
parser.add_argument('--production', nargs='+', type=str, \
help='option: cost | nocost',\
default = 'cost')
parser.add_argument('--split_type', nargs='+', type=str, \
help='option: splitbyobject | alldata | balancedavg',\
default = 'balancedavg')
parser.add_argument('--sim_scaling_lb', type=float, \
help='for BDA-enumerate only: this is the LOWER bound for the simScaling param. \
We will sweep through values from (sim_scaling_lb, sim_scaling_ub) in step_size sized steps',\
default = 1.0)
parser.add_argument('--sim_scaling_ub', type=float, \
help='for BDA-enumerate only: this is the UPPER bound for the simScaling param. \
We will sweep through values from (sim_scaling_lb, sim_scaling_ub) in step_size sized steps',\
default = 200.0)
parser.add_argument('--step_size', type=float, \
help='for BDA-enumerate only: this is the step size we will use to march through \
the simScaling range',\
default = 2.0)
parser.add_argument('--num_ais_samples', type=int, \
help='how many AIS samples do you want to take in parallel?',
default = 1)
args = parser.parse_args()
print args.split_type
perception = args.perception
production = args.production
pragmatics = args.pragmatics
split_type = args.split_type
lb = args.sim_scaling_lb
ub = args.sim_scaling_ub
if lb==ub: ## to get last value in range, make sure that np.arange has an interval of non-zero length to work with
ub = ub + 2
step_size = args.step_size
assert args.wppl in ['BDA','evaluate', 'BDA-enumerate', 'AIS', 'flatten']
## first run BDA-enumerate.wppl
if 'BDA-enumerate' in args.wppl:
ss_range = np.arange(lb,ub,step_size)
for i,ss in enumerate(ss_range):
for split in split_type:
run_bda_enumerate(ss,split)
## first run BDA.wppl
elif 'BDA' in args.wppl:
for perc in perception:
for prag in pragmatics:
for prod in production:
for split in split_type:
run_bda(perc,prag,prod,split)
elif 'evaluate' in args.wppl:
## then on output, run evaluate.wppl
for perc in perception:
print perc
for prag in pragmatics:
for prod in production:
for split in split_type:
run_evaluate(perc,prag,prod,split)
elif 'AIS' in args.wppl:
for perc in perception:
for prag in pragmatics:
for prod in production:
for split in split_type:
for num_samp in np.arange(args.num_ais_samples):
run_ais(perc,prag,prod,split,num_samp)
elif 'flatten' in args.wppl:
flatten_bda_output(adaptor_types = ['multimodal_pool1','multimodal_conv42','multimodal_fc6', 'human'], verbosity=1)
else:
print '{} wppl command not recognized'.format(args.wppl)
| 2.046875
| 2
|
flask_secret_generator.py
|
amahlaka/SpotifyPythonControl
| 1
|
12774366
|
<filename>flask_secret_generator.py
import random
import string
def generate_activation_token():
secret = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(64))
print(secret)
generate_activation_token()
| 2.9375
| 3
|
categories/urls.py
|
aykutgk/GoNaturalistic
| 0
|
12774367
|
from django.conf.urls import patterns, url
from categories import views
urlpatterns = patterns('',
# /categories/
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<slug>[-_\w]+)/$', views.CategoryPageView.as_view(), name='categoryPage'),
)
| 1.90625
| 2
|
LeetCode/algorithms/best-time-to-buy-and-sell-stock-iii/solution.py
|
cuihaoleo/exercises
| 1
|
12774368
|
<filename>LeetCode/algorithms/best-time-to-buy-and-sell-stock-iii/solution.py
class Solution:
def maxProfit(self, prices: List[int]) -> int:
running_min = prices[0]
best_trans1 = [0]
for p in prices[1:]:
if p < running_min:
running_min = p
best_trans1.append(max(p - running_min, best_trans1[-1]))
running_max = prices[-1]
best = best_trans1.pop()
best_trans2 = 0
for p in prices[:0:-1]:
if p > running_max:
running_max = p
if running_max - p > best_trans2:
best_trans2 = running_max - p
trans1 = best_trans1.pop()
if best_trans2 + trans1 > best:
best = best_trans2 + trans1
return best
| 3.53125
| 4
|
tests/test_views.py
|
art1415926535/django-rest-framework-include-mixin
| 0
|
12774369
|
<gh_stars>0
# Third Party
from model_mommy import mommy
# Django
from django.contrib.auth.models import Group, User
from django.test import TransactionTestCase
# Rest Framework
from rest_framework import status
from rest_framework.test import APIClient
# Project apps
from tests.models import UserProfile
class TestViews(TransactionTestCase):
@classmethod
def setUp(cls):
cls.client = APIClient()
super(TestViews, cls).setUpClass()
cls.number = 3
mommy.make(UserProfile, _quantity=cls.number)
mommy.make(Group, _quantity=cls.number)
for user in User.objects.all():
for group in Group.objects.all():
user.groups.add(group)
def test_select(self):
with self.assertNumQueries(1):
response = self.client.get(
'/users/',
data=[
('include[]', 'profile'),
],
)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
)
self.assertEqual(
len(response.data),
self.number,
)
self.assertSetEqual(
set(response.data[0].keys()),
{'id', 'first_name', 'last_name', 'username', 'profile'},
)
def test_prefetch(self):
with self.assertNumQueries(3):
response = self.client.get(
'/users/',
data=[
('include[]', 'groups'),
('include[]', 'may_know_users'),
],
)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
)
self.assertEqual(
len(response.data),
self.number,
)
self.assertSetEqual(
set(response.data[0].keys()),
{'id', 'first_name', 'last_name', 'username',
'groups', 'may_know_users'},
)
def test_without_select_related_and_prefetch_related(self):
response = self.client.get('/groups/')
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
)
| 2.140625
| 2
|
craftassist/agent/voxel_models/detection-transformer/datasets/voc2012.py
|
kandluis/droidlet
| 669
|
12774370
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from .voc import VOCDetection
from typing import Iterable
import to_coco_api
VOC_PATH = "/datasets01/VOC/060817/"
class VOCDetection2012(VOCDetection):
def __init__(self, image_set: str = "train", transforms: Iterable = None):
super(VOCDetection, self).__init__(
VOC_PATH, image_set=image_set, year="2012", download=False
)
self.prepare = to_coco_api.PrepareInstance()
self._transforms = transforms
from .voc import make_voc_transforms
def build(image_set, args):
# if we only use voc2012, then we need to adapt trainval and test to
# VOC2012 constraints
if image_set == "test":
image_set = "val"
if image_set == "trainval":
image_set = "train"
return VOCDetection2012(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
| 2.28125
| 2
|
make_splits.py
|
yandex-research/crosslingual_winograd
| 6
|
12774371
|
<filename>make_splits.py
import argparse
import random
import numpy as np
from sklearn.model_selection import train_test_split
def set_seed(s):
random.seed(s)
np.random.seed(s)
def get_examples(filename, use_lang, split=0., seed=1):
texts = []
with open(filename, encoding='utf-8') as ifh:
for line in ifh:
chunks = line.strip().split('\t')
lang = chunks[0]
if use_lang != lang:
continue
texts.append(line.strip())
if split == 0.:
return texts, []
X_train, X_test = train_test_split(texts, random_state=seed, train_size=split)
return X_train, X_test
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default='dataset.tsv', type=str,
help="The input .tsv file.")
parser.add_argument("--output_dir", default='splits', type=str,
help="The output folder.")
parser.add_argument("--folds", default=5, type=int,
help="The number of folds.")
args = parser.parse_args()
my_seed = 0
set_seed(my_seed)
for _l in ('en', 'ru', 'pt', 'jp', 'zh', 'fr'):
base_examples, test_examples = get_examples(args.input_file, _l, split=0.9, seed=my_seed)
print(f'LANG {_l}')
print(f'base {len(base_examples)}')
print(f'test {len(test_examples)}')
with open(f'{args.output_dir}/lang_{_l}_test.tsv', 'w', encoding='utf-8') as fh:
print('\n'.join(test_examples), file=fh)
for subseed in range(args.folds):
train, dev = train_test_split(base_examples, random_state=subseed, test_size=len(test_examples))
print(f'train{subseed} {len(train)} dev{subseed} {len(dev)}')
with open(f'{args.output_dir}/lang_{_l}_fold{subseed}_train.tsv', 'w', encoding='utf-8') as fh:
print('\n'.join(train), file=fh)
with open(f'{args.output_dir}/lang_{_l}_fold{subseed}_dev.tsv', 'w', encoding='utf-8') as fh:
print('\n'.join(dev), file=fh)
if __name__ == "__main__":
main()
| 2.8125
| 3
|
templates/document-a4/etc/settings.py
|
mayersre/sphinx-gitlab-quickstart
| 0
|
12774372
|
<reponame>mayersre/sphinx-gitlab-quickstart
'''
General settings for the Sphinx-Gitlab-Quickstart (SGQ) project
(c) <NAME>, 2020
MIT License
The Variables are used in the User interfaces. They should reflect your standard setup
GIT_ROOT in this place the new Project will be created
LIB_DIR place things here that all projects need
LOGO_DIR here the build will look for the Logos on the Documents
COMMON_DIR maybe mount a common git repository here, currently not used
You need to set in the Environment or in the Gitlab preferences :
SGQ_VERSION
SGQ_MESSAGE
'''
import os, sys
import git
# I usually clone this repo and run the gui from here, so git root is below ...
GIT_ROOT = os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) )
LIB_DIR = os.path.join(GIT_ROOT, 'library')
LOGO_DIR = os.path.join(LIB_DIR, 'logos')
COMMON_DIR = os.path.join(LIB_DIR, 'common')
SOURCE_DIR = os.path.join(GIT_ROOT, 'source')
#
# Create Version and Revision
#
repo = git.Repo(GIT_ROOT)
#
try :
GIT_VERSION=repo.tags[-1].name
GIT_MESSAGE=repo.git.tag(n=True).split('\n')[-1]
except IndexError :
GIT_VERSION = os.environ.get('SGQ_VERSION', 'Initial Version not defined yet')
GIT_MESSAGE = os.environ.get('SGQ_MESSAGE', 'Initial Message not defined yet')
#
GIT_REVISION=repo.commit().hexsha
| 1.765625
| 2
|
reles/modificators.py
|
GermerCarsten/RelES
| 0
|
12774373
|
from __future__ import absolute_import
from collections import namedtuple
from time import time
from flask import g
from jsonschema import ValidationError
from reles.references import resolve_field_reference
ProcessingContext = namedtuple(
'ProcessingContext',
('datastore', 'doc_id', 'full_entity', 'full_schema')
)
def _log_access(entity, schema, parent, context):
if entity:
raise ValidationError("'log_access' entity cannot be overridden manually")
if not context.doc_id:
log = {schema['created']: int(time())}
else:
log = {schema['updated']: int(time())}
# This dependency means that the modificator will only work in a request context.
log[schema['user']] = g.user['email']
log[schema['customer']] = g.customer['name']
return log
def _fill_from_fkey(entity, schema, parent, context):
# type: (Any, dict, ProcessingContext) -> Union[dict, Sequence, str]
"""
The *fill from foreign key* modifier pulls a related document - or one of it's attributes -
into the document being processed. This can be useful to make it possible to enrich the indexes
for the document being processed with data from the related document, essentially denormalizing
their relationship. This can make certain kind of queries easier or be required to make them
possible at all.
"""
def denormalize(id):
source_document = context.datastore.get_document(_index, _doc_type, id)
if _field:
docs = resolve_field_reference(_field, None, source_document)
return docs[0] if docs else None
else:
return source_document
if entity:
raise ValidationError("'fill_from_fkey' entity can not be set to anything!")
_index = schema['source']['index']
_doc_type = schema['source']['doc_type']
_field = schema['source'].get('field', '')
_fkey_field = schema['fkey_field']
_fkey_values = resolve_field_reference(_fkey_field, parent, context.full_entity)
if _fkey_values:
# The fkey(s) pointing at the data to be denormalized are set
denormalized = []
for _fkey_data in _fkey_values:
if isinstance(_fkey_data, list):
denormalized.extend([denormalize(_id) for _id in _fkey_data])
else:
denormalized.append(denormalize(_fkey_data))
return denormalized
else:
return None
def _include_parents(entity, schema, parent, context):
# type: (list, dict, Any, ProcessingContext) -> Sequence
def _get_parents(child_id):
while child_id is not None:
yield child_id
child_id = context.datastore.get_document(
index,
doc_type,
child_id
).get(parent_field)
if not entity:
# Nothing to expand
return entity
index = schema['index']
doc_type = schema['doc_type']
parent_field = schema['parent_field']
parents = set()
for child_id in entity:
parents.update([id for id in _get_parents(child_id)])
return list(parents)
class Processor(object):
_processors = {
'x-log-access': _log_access,
'x-fill-from-fkey': _fill_from_fkey,
'x-include-parents': _include_parents,
}
def __init__(self, schema, datastore, processors=None):
# type: (dict, DataStore, dict) -> None
super(Processor, self).__init__()
self._datastore = datastore
self.schema = schema
if processors is not None:
self._processors = processors
def _process(self, key, schema, entity, parent, context):
# type: (str, dict, Union[dict, Sequence, str], Union[dict, Sequence, str], ProcessingContext) -> Union[dict, Sequence, str]
"""
Recursive helper function for process(). Applies processors in a *depth first* manner, if
no processors are applicable it ends up copying the entity.
"""
# apply any applicable processors on this entity...
for processor_name, processor in self._processors.items():
if processor_name in schema:
entity = processor(entity, schema[processor_name], parent, context)
# ...then recurse deeper into the schema/entity
if schema['type'] == 'object' and isinstance(entity, dict):
for _key, _schema in schema.get('properties', {}).items():
# If the field has not been sent, `None` is passed down as the entity.
# Modifiers will be applied, but recursion stops due to type checks.
processed = self._process(_key, _schema, entity.get(_key), entity, context)
if processed is not None:
entity[_key] = processed
return entity
elif schema['type'] == 'array' and isinstance(entity, list):
return [self._process(key, schema['items'], _entity, entity, context) for _entity in entity]
else:
return entity
def process(self, entity, id=None):
# type: (Union[dict, Sequence, str]) -> Union[dict, Sequence, str]
"""
Applies any configured processors to the given entity (document). The result will always
be a new object and given entity unmodified, even if no processor was applicable.
"""
context = ProcessingContext(
datastore=self._datastore,
doc_id=id,
full_entity=entity,
full_schema=self.schema,
)
for key, sub_schema in self.schema['properties'].items():
processed = self._process(key, sub_schema, entity.get(key, None), entity, context)
if processed is not None:
entity[key] = processed
# TODO: eliminate `return` statement, use `entity` as in-out-parameter
return entity
| 2.34375
| 2
|
examples/tmva/plot_multiclass.py
|
douglasdavis/root_numpy
| 83
|
12774374
|
"""
=============================================
Multiclass Classification with NumPy and TMVA
=============================================
"""
from array import array
import numpy as np
from numpy.random import RandomState
from root_numpy.tmva import add_classification_events, evaluate_reader
from root_numpy import ROOT_VERSION
import matplotlib.pyplot as plt
from ROOT import TMVA, TFile, TCut
plt.style.use('ggplot')
RNG = RandomState(42)
# Construct an example multiclass dataset
n_events = 1000
class_0 = RNG.multivariate_normal(
[-2, -2], np.diag([1, 1]), n_events)
class_1 = RNG.multivariate_normal(
[0, 2], np.diag([1, 1]), n_events)
class_2 = RNG.multivariate_normal(
[2, -2], np.diag([1, 1]), n_events)
X = np.concatenate([class_0, class_1, class_2])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 3)
y[:class_0.shape[0]] *= 0
y[-class_2.shape[0]:] *= 2
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Split into training and test datasets
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
output = TFile('tmva_output.root', 'recreate')
factory = TMVA.Factory('classifier', output,
'AnalysisType=Multiclass:'
'!V:Silent:!DrawProgressBar')
if ROOT_VERSION >= '6.07/04':
data = TMVA.DataLoader('.')
else:
data = factory
for n in range(2):
data.AddVariable('f{0}'.format(n), 'F')
# Call root_numpy's utility functions to add events from the arrays
add_classification_events(data, X_train, y_train, weights=w_train)
add_classification_events(data, X_test, y_test, weights=w_test, test=True)
# The following line is necessary if events have been added individually:
data.PrepareTrainingAndTestTree(TCut('1'), 'NormMode=EqualNumEvents')
# Train an MLP
if ROOT_VERSION >= '6.07/04':
BookMethod = factory.BookMethod
else:
BookMethod = TMVA.Factory.BookMethod
BookMethod(data, 'MLP', 'MLP',
'NeuronType=tanh:NCycles=200:HiddenLayers=N+2,2:'
'TestRate=5:EstimatorType=MSE')
factory.TrainAllMethods()
# Classify the test dataset with the BDT
reader = TMVA.Reader()
for n in range(2):
reader.AddVariable('f{0}'.format(n), array('f', [0.]))
reader.BookMVA('MLP', 'weights/classifier_MLP.weights.xml')
class_proba = evaluate_reader(reader, 'MLP', X_test)
# Plot the decision boundaries
plot_colors = "rgb"
plot_step = 0.02
class_names = "ABC"
cmap = plt.get_cmap('Paired')
fig = plt.figure(figsize=(5, 5))
fig.patch.set_alpha(0)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = evaluate_reader(reader, 'MLP', np.c_[xx.ravel(), yy.ravel()])
Z = np.argmax(Z, axis=1) - 1
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.5)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(3), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=cmap,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
plt.tight_layout()
plt.show()
| 2.96875
| 3
|
contact/models.py
|
deepindo/DoPython
| 4
|
12774375
|
<reponame>deepindo/DoPython
from django.db import models
from django.utils import timezone
from datetime import datetime
class JD(models.Model):
"""企业招聘信息"""
title = models.CharField(max_length=50, verbose_name='招聘岗位')
description = models.TextField(verbose_name='招聘要求')
publish_date = models.DateTimeField(default=timezone.now, verbose_name='发布时间')
def __str__(self):
return self.title
class Meta:
verbose_name = verbose_name_plural = '招聘信息'
ordering = ['-publish_date']
db_table = 'dy_JD'
class Resume(models.Model):
"""简历信息"""
# 性别
GenderType = (
('男', '男'),
('女', '女'),
)
# 审批状态
ApproveType = (
(1, '待审批'),
(2, '审批通过'),
(3, '审批拒绝'),
)
name = models.CharField(max_length=50, verbose_name='姓名')
ID_number = models.CharField(max_length=30, verbose_name='身份证号')
gender = models.CharField(max_length=5, choices=GenderType, default='男', verbose_name='性别')
email = models.EmailField(max_length=50, verbose_name='邮箱')
birthday = models.DateField(default=datetime.strftime(datetime.now(), '%Y-%m-%d'), verbose_name='出生日期')
education_background = models.CharField(max_length=50, default='本科', verbose_name='学历')
graduate_institution = models.CharField(max_length=50, verbose_name='毕业院校')
major = models.CharField(max_length=50, verbose_name='主修专业')
apply_position = models.CharField(max_length=40, verbose_name='申请职位')
experience = models.TextField(blank=True, null=True, verbose_name='项目经验')
person_photo = models.ImageField(upload_to='contact/recruit/%Y-%m-%d', verbose_name='个人照片')
approve_status = models.IntegerField(choices=ApproveType, default=1, verbose_name='审批状态')
submit_date = models.DateTimeField(default=timezone.now, verbose_name='提交时间')
def __str__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = '简历信息'
ordering = ('-submit_date', '-approve_status')
db_table = 'dy_resume'
class Contact(models.Model):
# 荣誉
description = models.TextField(max_length=500, verbose_name='图片描述', blank=True, null=True)
# 图片地址
photo = models.ImageField(upload_to='award/', verbose_name='图片地址', blank=True)
class Meta:
verbose_name = verbose_name_plural = '联系我们' # 在管理后台看到的表名
db_table = 'dy_contact' # 在数据库看到的表名
| 2.484375
| 2
|
drl_negotiation/scenarios/scml.py
|
YueNing/tn_source_code
| 0
|
12774376
|
from drl_negotiation.scenario import BaseScenario
from drl_negotiation.core import TrainWorld, MySCML2020Agent
from drl_negotiation.myagent import MyComponentsBasedAgent
from drl_negotiation.hyperparameters import *
from negmas.helpers import get_class
from scml.scml2020 import (
DecentralizingAgent,
BuyCheapSellExpensiveAgent,
SCML2020World,
is_system_agent,
)
from typing import Union
import numpy as np
class Scenario(BaseScenario):
def make_world(self, config=None) -> TrainWorld:
# configuration, for Scenario scml
if config is None:
agent_types = [get_class(agent_type, ) for agent_type in TRAINING_AGENT_TYPES]
n_steps = N_STEPS
world_configuration = SCML2020World.generate(
agent_types=agent_types,
n_steps=n_steps
)
else:
world_configuration = SCML2020World.generate(
agent_types=config['agent_types'],
agent_params=config['agent_params'][:-2],
n_steps=config['n_steps']
)
world = TrainWorld(configuration=world_configuration)
if config is None:
self.reset_world(world)
return world
def reset_world(self, world):
# callback, reset
# reset world, agents, factories
# fixed position
agent_types = world.configuration['agent_types']
agent_params = world.configuration['agent_params'][:-2]
n_steps = world.configuration['n_steps']
reset_configuration = SCML2020World.generate(
#TODO: [Future work Improvement] could be reset
agent_types=agent_types,
agent_params=agent_params,
n_steps=n_steps
)
world.__init__(configuration=reset_configuration)
def benchmark_data(self, agent, world, seller=True):
#TODO: data for benchmarkign purposes, info_callabck,
# will be rendered when display is true
# how to compare different companies, Ratio Analysis
# https://www.investopedia.com/ask/answers/032315/how-does-ratio-analysis-make-it-easier-compare-different-companies.asp
# price-to-earnings ratio and net profit margin
# Margin Ratios and Return Ratios
# https://corporatefinanceinstitute.com/resources/knowledge/finance/profitability-ratios/
profitability = []
initial_balances = []
factories = [_ for _ in world.factories if not is_system_agent(_.agent_id)]
for i, factory in enumerate(factories):
initial_balances.append(factory.initial_balance)
normalize = all(_ != 0 for _ in initial_balances)
for _ in world.agents:
if world.agents[_].action_callback == "system": continue
if world.agents[_] in world.heuristic_agents:
if normalize:
profitability.append(
(agent.state.f[2] - agent.state.f[0]) / agent.state.f[0] -
([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] -
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]) /
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]
)
else:
profitability.append(
(agent.state.f[2] - agent.state.f[0]) -
([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] -
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0])
)
return {"profitability": profitability}
def good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world, seller=True):
# callback, reward
# Delayed reward problem????
# Keep this in mind when writing reward functions: You get what you incentivize, not what you intend.
# idea 1: external rewards, e.g. balance - initial balance for agent, -(balance - initial balance) for adversary agent
# idea 2: Intrinsic motivation rewards.
# On Learning Intrinsic Rewards for Policy Gradient Methods, https://arxiv.org/abs/1804.06459
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# 1. Difference of balance with the end of last step, begin of current step
# 2. Difference of balance with the other agents
rew = 0
# means in this world step, the agent starts a negotiation except initial state
if agent.state.o_negotiation_step == agent.awi.current_step:
rew = (agent.state.f[2]- agent.state.f[1]) / (agent.state.f[0]) * REW_FACTOR
gap = []
for entity in world.entities:
if entity is agent: continue
if entity.action_callback == "system": continue
if entity.action_callback is None: continue
initial_balance = [_.initial_balance for _ in world.factories if _.agent_id == entity.id][0]
current_balance = [_.current_balance for _ in world.factories if _.agent_id == entity.id][0]
gap.append((current_balance - initial_balance) / initial_balance)
rew -= np.mean(np.array(gap))
return rew
def adversary_reward(self, agent, world):
#TODO: keep the good agents near the intial funds
# neg reward
# pos reward
# agent.init_f - agent.f
rew = 0
return rew
def observation(self, agent: Union[MyComponentsBasedAgent, MySCML2020Agent], world: Union[TrainWorld], seller=True):
# get all observation,
# callback: obrvation
_obs = agent._get_obs(seller=seller)
#2. Economic gap with others, extra information
# economic_gaps = []
#
# for entity in world.entities:
# if entity is agent: continue
# economic_gaps.append(entity.state.f - agent.state.f)
#
# economic_gaps = np.array(economic_gaps)
#return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c)
# return np.concatenate((economic_gaps.flatten(), _obs))
return _obs
def done(self, agent, world, seller=True):
# callback of done
# simulation is end
if world.world_done:
return True
import ipdb
# agent is brankrupt
return [_.is_bankrupt for _ in world.factories if _.agent_id == agent.id][0]
| 2.15625
| 2
|
python/smurff/cmdline.py
|
ExaScience/smurff
| 65
|
12774377
|
<reponame>ExaScience/smurff
#!/usr/bin/env python
import argparse
import smurff.matrix_io as mio
import smurff
def read_list(cfg, prefix):
return [ cfg[d] for d in cfg.keys() if d.startswith(prefix) ]
def read_data(cfg, section):
pos = cfg.get(section, "pos", fallback = None)
if pos is not None:
pos = map(int, pos.split(","))
data = mio.read_matrix(cfg.get(section, "file"))
matrix_type = cfg.get(section, "type", fallback = None)
noise_model = cfg.get(section, "noise_model", fallback=None)
if noise_model is not None:
precision = cfg.getfloat(section, "precision")
sn_init = cfg.getfloat(section, "sn_init")
sn_max = cfg.getfloat(section, "sn_max")
threshold = cfg.getfloat(section, "noise_threshold")
noise = smurff.wrapper.NoiseConfig(noise_model, precision, sn_init, sn_max, threshold)
else:
noise = None
direct = cfg.getboolean(section, "direct", fallback=None)
tol = cfg.getfloat(section, "tol", fallback=None)
return data, matrix_type, noise, pos, direct, tol
def read_ini(fname):
from configparser import ConfigParser
cfg = ConfigParser()
cfg.read(fname)
priors = read_list(cfg["global"], "prior_")
seed = cfg.getint("global", "random_seed") if cfg.getboolean("global", "random_seed_set") else None
threshold = cfg.getfloat("global", "threshold") if cfg.getboolean("global", "classify") else None
session = smurff.TrainSession(
priors,
cfg.getint("global", "num_latent"),
cfg.getint("global", "num_threads", fallback=None),
cfg.getint("global", "burnin"),
cfg.getint("global", "nsamples"),
seed,
threshold,
cfg.getint("global", "verbose"),
cfg.get ("global", "save_name", fallback=smurff.temp_savename()),
cfg.getint("global", "save_freq", fallback=None),
cfg.getint("global", "checkpoint_freq", fallback=None),
)
data, matrix_type, noise, *_ = read_data(cfg, "train")
session.setTrain(data, noise, matrix_type == "scarce")
data, *_ = read_data(cfg, "test")
session.setTest(data)
for mode in range(len(priors)):
section = "side_info_%d" % mode
if section in cfg.keys():
data, matrix_type, noise, pos, direct, tol = read_data(cfg, section)
session.addSideInfo(mode, data, noise, direct)
return session
def main():
parser = argparse.ArgumentParser(description='pySMURFF - command line utility to the SMURFF Python module')
parser.add_argument("command", help="Do full 'run' or only 'save' to .h5", choices=['run', 'save'])
group = parser.add_argument_group("General parameters")
group.add_argument("--version", action="store_true", help="print version info (and exit)")
group.add_argument("--verbose", metavar= "NUM", type=int, default=1, help="verbose output (default = 1}")
group.add_argument("--ini", metavar="FILE", type=str, help="read options from this .ini file")
group.add_argument("--num-threads", metavar= "NUM", type=int, help="number of threads (0 = default by OpenMP")
group.add_argument("--seed", metavar= "NUM", type=int, help="random number generator seed")
group = parser.add_argument_group("Used during training")
group.add_argument("--train", metavar="FILE", type=str, help="train data file")
group.add_argument("--test", metavar="FILE", type=str, help="test data")
group.add_argument("--row-features", metavar="FILE", type=str, help="sparse/dense row features")
group.add_argument("--col-features", metavar="FILE", type=str, help="sparse/dense column features")
group.add_argument("--prior", metavar="NAME", nargs=2, type=str, help="provide a prior-type for each dimension of train; prior-types: <normal|normalone|spikeandslab|macau|macauone>")
group.add_argument("--burnin", metavar="NUM", type=int, help="number of samples to discard")
group.add_argument("--nsamples", metavar="NUM", type=int, help="number of samples to collect")
group.add_argument("--num-latent", metavar="NUM", type=int, help="number of latent dimensions")
group.add_argument("--threshold", metavar="NUM", type=float, help="threshold for binary classification and AUC calculation")
group = parser.add_argument_group("Storing models and predictions")
group.add_argument("--restore-from", metavar="FILE", type=str, help="restore trainSession from a saved .h5 file")
group.add_argument("--save-name", metavar="FILE", type=str, help="save model and/or predictions to this .h5 file")
group.add_argument("--save-freq", metavar="NUM", type=int, help="save every n iterations (0 == never, -1 == final model)")
group.add_argument("--checkpoint-freq", metavar="NUM", type=int, help="save state every n seconds, only one checkpointing state is kept")
args = parser.parse_args()
print(args)
if args.version:
print("SMURFF %s" % smurff.version)
exit
session = smurff.TrainSession()
if args.ini is not None:
session = read_ini(args.ini)
file_options = {
"train" : session.setTrain,
"test" : session.setTest,
"row_features" : lambda x: session.addSideInfo(0, x),
"col_features" : lambda x: session.addSideInfo(1, x),
}
for opt, func in file_options.items():
if opt in vars(args) and vars(args)[opt] is not None:
fname = vars(args)[opt]
data = mio.read_matrix(fname)
func(data)
other_options = {
"verbose" : session.setVerbose,
"num_threads" : session.setNumThreads,
"seed" : session.setRandomSeed,
"prior" : session.setPriorTypes,
"burnin" : session.setBurnin,
"nsamples" : session.setNSamples,
"num_latent" : session.setNumLatent,
"threshold" : session.setThreshold,
"restore_from" : session.setRestoreName,
"save_name" : session.setSaveName,
"save_freq" : session.setSaveFreq,
"checkpoint-freq" : session.setCheckpointFreq,
}
print(vars(args))
for opt, func in other_options.items():
if opt in vars(args) and vars(args)[opt] is not None:
value = vars(args)[opt]
print("processing opt:", opt, "with value", value)
func(value)
if args.command == "run":
session.run()
else:
session.init() # init will validate and save
if __name__ == "__main__":
main()
| 2.234375
| 2
|
setup.py
|
erickvneri/st-schema-python
| 0
|
12774378
|
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import setuptools
import os
basedir = os.path.abspath(os.path.dirname(__file__))
with open(basedir + "/README.md", "r") as ld:
long_description = ld.read()
setuptools.setup(
name="st-schema-python",
version="2.0.0",
author="erickvneri",
description="SmartThings Schema Connector Python SDK",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/erickvneri/st-schema-python/",
packages=setuptools.find_packages(),
install_requires=["marshmallow"],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Home Automation",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 1.296875
| 1
|
month_time_day.py
|
camrit/SupplyChainAnalytics
| 0
|
12774379
|
<gh_stars>0
import datetime
list=["209223000","211409100","211822000","211881000","212048000","215058000","218019000","218779000","219008000","219010252","219082000",
"220018000","220289000","220552000","220609000","220614000","228330600","236216000","240244000","240891000","244013009","244267000",
"244678000","244790522","246397000","246694000","247604000","248095000","248295000","248602000","255623000","255806146","256122000",
"256757000","257519000","257742000","258764000","259360000","259888000","259890000","259936000","265581970","265724260","265734000",
"265882000","266209000","266212000","266225000","266235000","273337270","311601000","311814000","419001223","636016188"]
temp="# Timestamp, MMSI, season, day_time, Latitude, Longitude, rain, htsgwsfc_2, perpwsfc_2, precsno_3, qlml_3, speedmax_3, taugwx_3, taugwy_3, tlml_3, ulml_3, vlml_3, dirpwsfc_4, tcc_5, u_6, v_6, hic_7, ui_7, vi_7, rain-month_8, wsp_9, wind_stress_9, wsp_err_9, albedo_10, albvisdf_10, Width, Length, Gross Tonnage, Summer Deadweight, Draught, Navigational status, ROT, SOG, COG, Heading, Ship type, di, Actual time"
def season(dat):
s = dat
date=s.split(" ")
month=date[0].split("/")[1]
hour=date[1].split(":")[0]
if month in ['03','04','05']:
add=1
elif month in ['06','07','08']:
add=2
elif month in ['09','10','11']:
add=3
elif month in ['12','01','02']:
add=4
if hour>='00' and hour<='06':
time=1
elif hour>'06' and hour<='12':
time=2
elif hour>'12' and hour<='18':
time=3
elif hour>'18' and hour<='24':
time=4
return add, time
def printme( mmis):
with open("C:/data set/danmark/14.csv") as f2:
for line3 in f2:
if mmis in line3:
fields = line3.split(",")
line1 = temp.split(",")
line1[0] = fields[0]
line1[1] = fields[1]
x,y= season(fields[0])
line1[2] = x
line1[3] = y
line1[4] = fields[4]
line1[5] = fields[5]
line1[6] = fields[6]
line1[7] = fields[7]
line1[8] = fields[8]
line1[9] = fields[9]
line1[10] = fields[10]
line1[11] = fields[11]
line1[12] = fields[12]
line1[13] = fields[13]
line1[14] = fields[14]
line1[15] = fields[15]
line1[16] = fields[16]
line1[17] = fields[17]
line1[18] = fields[18]
line1[19] = fields[19]
line1[20] = fields[20]
line1[21] = fields[21]
line1[22] = fields[22]
line1[23] = fields[23]
line1[24] = fields[24]
line1[25] = fields[25]
line1[26] = fields[26]
line1[27] = fields[27]
line1[28] = fields[28]
line1[29] = fields[29]
line1[30] = fields[30]
line1[31] = fields[31]
line1[32] = fields[32]
line1[33] = fields[33]
line1[34] = fields[34]
line1[35] = fields[35]
line1[36] = fields[36]
line1[37] = fields[37]
line1[38] = fields[38]
line1[39] = fields[39]
line1[40] = fields[40]
line1[41] = fields[41]
line1[42] = fields[42]
with open("C:/data set/danmark/asad1.txt", "a") as file_object:
mak = ','.join(map(str, line1))
file_object.write(mak)
return;
for mmis in list:
printme(mmis)
print(mmis)
| 1.617188
| 2
|
repair/featurize/occurefeatfusion.py
|
HoloClean/RecordFusion
| 2
|
12774380
|
from .featurizer import Featurizer
from dataset import AuxTables
from dataset.dataset import dictify
import pandas as pd
import itertools
import torch
from tqdm import tqdm
#GM
class OccurFeaturizerfusion(Featurizer):
def specific_setup(self):
self.name = "OccurFeaturizerfusion"
if not self.setup_done:
raise Exception('Featurizer %s is not properly setup.'%self.name)
if self.tensor is None:
self.raw_data_dict = self.ds.raw_data.df.set_index(self.ds.key).to_dict('index')
self.all_attrs = self.ds.get_attributes()
self.all_attrs.remove(self.ds.src)
self.all_attrs.remove(self.ds.key)
self.create_cooccur_stats_dictionary()
# self.all_attrs = self.ds.get_attributes()
self.attrs_number = len(self.ds.attr_to_idx)
def create_tensor(self):
# Iterate over tuples in domain
tensors = []
# Set tuple_id index on raw_data
t = self.ds.aux_table[AuxTables.cell_domain]
self.create_cooccur_dictionary()
sorted_domain = t.df.reset_index().sort_values(by=['_vid_'])[['_tid_','object', 'attribute','_vid_','domain']]
records = sorted_domain.to_records()
for row in tqdm(list(records)):
#Get tuple from raw_dataset
feat_tensor = self.gen_feat_tensor(row)
tensors.append(feat_tensor)
self.tensor = torch.cat(tensors)
return self.tensor
def gen_feat_tensor(self, row):
tensor = torch.zeros(1, self.classes, self.attrs_number) # dimension is 1 X (max domain size) X (number of attributes)
rv_attr = row['attribute']
object = row['object']
domain = row['domain'].split('|||')
rv_domain_idx = {val: idx for idx, val in enumerate(domain)}
# set the index corresponding to this value to the cooccurence with another value
for attr in self.all_attrs:
if attr != rv_attr and attr != self.ds.key :
attr_idx = self.ds.attr_to_idx[attr]
co_value = self.dictionary_cooccur[object][attr]
count1 = self.domain_stats[object][attr][co_value]
for rv_val in rv_domain_idx:
count = self.cooccur_pair_stats[object][attr][rv_attr].get((co_value, rv_val), 0)
prob = float(count) / count1
tensor[0][rv_domain_idx[rv_val]][attr_idx] = prob
return tensor
def create_cooccur_dictionary(self):
"""
create cooccur dictionary from the Current_init
Dictionary of current inferred values of dataset's cells
"""
self.dictionary_cooccur = {}
current_init_dict = self.ds.aux_table[AuxTables.current_init].df.to_dict('index')
for object_key in current_init_dict :
self.dictionary_cooccur[object_key] = {}
for attr in self.all_attrs:
if attr != self.ds.src and attr != self.ds.key:
self.dictionary_cooccur[object_key][attr] = current_init_dict [object_key][attr]
return
def create_cooccur_stats_dictionary(self):
"""
Creates the cooccurrence for value per objects
"""
# counts frequency of two observed values occurring together for two of an entity's attributes
# e.g. self.cooccur_pair_stats[entity][attr1][attr2][(val1, val2)] = n
# where n is the number of times attr1=val1 and attr2=val2 for that entity
self.cooccur_pair_stats = {}
# counts frequency of observed values for a particular entity's attributes
# e.g. self.domain_stats[entity][attr][val] = n
# where n is the number of times attr=val for that entity
self.domain_stats = {}
# iterate through provided dataset
for row in self.ds.raw_data.df.to_dict('records'):
# if an entity is not in domain_stats object,
# initialize dictionaries
if row[self.ds.key] not in self.domain_stats:
self.cooccur_pair_stats[row[self.ds.key]] = {}
self.domain_stats[row[self.ds.key]] = {}
# create the domain_stats for each value
# iterate through attributes
for co_attribute in self.all_attrs:
if co_attribute != self.ds.key and co_attribute != "src":
# initialize dictionaries if attribute hasn't been initialized for this entity
if co_attribute not in self.domain_stats[row[self.ds.key]]:
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute] = {}
self.domain_stats[row[self.ds.key]][co_attribute] = {}
value = row[co_attribute]
if value not in self.domain_stats[row[self.ds.key]][
co_attribute]:
self.domain_stats[row[self.ds.key]][co_attribute][
value] = 0.0
self.domain_stats[row[self.ds.key]][co_attribute][
value] += 1.0
# create the cooccur_pair_stats
for co_attribute1 in self.all_attrs:
if co_attribute1 != self.ds.key and co_attribute1 != "src" and co_attribute1!= co_attribute:
if co_attribute1 not in \
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute]:
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute][co_attribute1] = {}
value2 = row[co_attribute1]
assgn_tuple = (value, value2)
if assgn_tuple not in \
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute][co_attribute1]:
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute][co_attribute1][
assgn_tuple] = 0.0
self.cooccur_pair_stats[row[self.ds.key]][
co_attribute][co_attribute1][
assgn_tuple] += 1.0
return
def feature_names(self):
return self.all_attrs
| 2.375
| 2
|
src/encoded/tests/test_upgrade_organism.py
|
procha2/encoded
| 102
|
12774381
|
<filename>src/encoded/tests/test_upgrade_organism.py
import pytest
def test_organism_upgrade(upgrader, organism_1_0):
value = upgrader.upgrade('organism', organism_1_0, target_version='2')
assert value['schema_version'] == '2'
assert value['status'] == 'current'
def test_organism_upgrade_4_5(upgrader, organism_4_0):
value = upgrader.upgrade('organism', organism_4_0, current_version='4', target_version='5')
assert value['schema_version'] == '5'
assert value['status'] == 'released'
organism_4_0['status'] = 'disabled'
organism_4_0['schema_version'] = '4'
value = upgrader.upgrade('organism', organism_4_0, current_version='4', target_version='5')
assert value['schema_version'] == '5'
assert value['status'] == 'deleted'
| 2.234375
| 2
|
scripts/caffe/convert_caffe_weights_to_npy.py
|
nnmhuy/flownet2-tf
| 438
|
12774382
|
<reponame>nnmhuy/flownet2-tf<gh_stars>100-1000
"""
Please read README.md for usage instructions.
Extracts Caffe parameters from a given caffemodel/prototxt to a dictionary of numpy arrays,
ready for conversion to TensorFlow variables. Writes the dictionary to a .npy file.
"""
import argparse
import caffe
import numpy as np
import os
import tempfile
FLAGS = None
ARCHS = {
'C': {
'CAFFEMODEL': '../models/FlowNet2-C/FlowNet2-C_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-C/FlowNet2-C_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetC/conv1',
'conv2': 'FlowNetC/conv2',
'conv3': 'FlowNetC/conv3',
'conv_redir': 'FlowNetC/conv_redir',
'conv3_1': 'FlowNetC/conv3_1',
'conv4': 'FlowNetC/conv4',
'conv4_1': 'FlowNetC/conv4_1',
'conv5': 'FlowNetC/conv5',
'conv5_1': 'FlowNetC/conv5_1',
'conv6': 'FlowNetC/conv6',
'conv6_1': 'FlowNetC/conv6_1',
'Convolution1': 'FlowNetC/predict_flow6',
'deconv5': 'FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetC/predict_flow5',
'deconv4': 'FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetC/predict_flow4',
'deconv3': 'FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetC/predict_flow3',
'deconv2': 'FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetC/predict_flow2',
}
},
'S': {
'CAFFEMODEL': '../models/FlowNet2-S/FlowNet2-S_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-S/FlowNet2-S_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetS/conv1',
'conv2': 'FlowNetS/conv2',
'conv3': 'FlowNetS/conv3',
'conv3_1': 'FlowNetS/conv3_1',
'conv4': 'FlowNetS/conv4',
'conv4_1': 'FlowNetS/conv4_1',
'conv5': 'FlowNetS/conv5',
'conv5_1': 'FlowNetS/conv5_1',
'conv6': 'FlowNetS/conv6',
'conv6_1': 'FlowNetS/conv6_1',
'Convolution1': 'FlowNetS/predict_flow6',
'deconv5': 'FlowNetS/deconv5',
'upsample_flow6to5': 'FlowNetS/upsample_flow6to5',
'Convolution2': 'FlowNetS/predict_flow5',
'deconv4': 'FlowNetS/deconv4',
'upsample_flow5to4': 'FlowNetS/upsample_flow5to4',
'Convolution3': 'FlowNetS/predict_flow4',
'deconv3': 'FlowNetS/deconv3',
'upsample_flow4to3': 'FlowNetS/upsample_flow4to3',
'Convolution4': 'FlowNetS/predict_flow3',
'deconv2': 'FlowNetS/deconv2',
'upsample_flow3to2': 'FlowNetS/upsample_flow3to2',
'Convolution5': 'FlowNetS/predict_flow2',
}
},
'CS': {
'CAFFEMODEL': '../models/FlowNet2-CS/FlowNet2-CS_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CS/FlowNet2-CS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCS/FlowNetC/predict_flow2',
# Net S
'net2_conv1': 'FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCS/FlowNetS/predict_flow2',
}
},
'CSS': {
'CAFFEMODEL': '../models/FlowNet2-CSS/FlowNet2-CSS_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS/FlowNet2-CSS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'CSS-ft-sd': {
'CAFFEMODEL': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'SD': {
'CAFFEMODEL': '../models/FlowNet2-SD/FlowNet2-SD_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-SD/FlowNet2-SD_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv0': 'FlowNetSD/conv0',
'conv1': 'FlowNetSD/conv1',
'conv1_1': 'FlowNetSD/conv1_1',
'conv2': 'FlowNetSD/conv2',
'conv2_1': 'FlowNetSD/conv2_1',
'conv3': 'FlowNetSD/conv3',
'conv3_1': 'FlowNetSD/conv3_1',
'conv4': 'FlowNetSD/conv4',
'conv4_1': 'FlowNetSD/conv4_1',
'conv5': 'FlowNetSD/conv5',
'conv5_1': 'FlowNetSD/conv5_1',
'conv6': 'FlowNetSD/conv6',
'conv6_1': 'FlowNetSD/conv6_1',
'Convolution1': 'FlowNetSD/predict_flow6',
'deconv5': 'FlowNetSD/deconv5',
'upsample_flow6to5': 'FlowNetSD/upsample_flow6to5',
'interconv5': 'FlowNetSD/interconv5',
'Convolution2': 'FlowNetSD/predict_flow5',
'deconv4': 'FlowNetSD/deconv4',
'upsample_flow5to4': 'FlowNetSD/upsample_flow5to4',
'interconv4': 'FlowNetSD/interconv4',
'Convolution3': 'FlowNetSD/predict_flow4',
'deconv3': 'FlowNetSD/deconv3',
'upsample_flow4to3': 'FlowNetSD/upsample_flow4to3',
'interconv3': 'FlowNetSD/interconv3',
'Convolution4': 'FlowNetSD/predict_flow3',
'deconv2': 'FlowNetSD/deconv2',
'upsample_flow3to2': 'FlowNetSD/upsample_flow3to2',
'interconv2': 'FlowNetSD/interconv2',
'Convolution5': 'FlowNetSD/predict_flow2',
},
},
'2': {
'CAFFEMODEL': '../models/FlowNet2/FlowNet2_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2/FlowNet2_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNet2/FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNet2/FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNet2/FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNet2/FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNet2/FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow2',
# Net SD
'netsd_conv0': 'FlowNet2/FlowNetSD/conv0',
'netsd_conv1': 'FlowNet2/FlowNetSD/conv1',
'netsd_conv1_1': 'FlowNet2/FlowNetSD/conv1_1',
'netsd_conv2': 'FlowNet2/FlowNetSD/conv2',
'netsd_conv2_1': 'FlowNet2/FlowNetSD/conv2_1',
'netsd_conv3': 'FlowNet2/FlowNetSD/conv3',
'netsd_conv3_1': 'FlowNet2/FlowNetSD/conv3_1',
'netsd_conv4': 'FlowNet2/FlowNetSD/conv4',
'netsd_conv4_1': 'FlowNet2/FlowNetSD/conv4_1',
'netsd_conv5': 'FlowNet2/FlowNetSD/conv5',
'netsd_conv5_1': 'FlowNet2/FlowNetSD/conv5_1',
'netsd_conv6': 'FlowNet2/FlowNetSD/conv6',
'netsd_conv6_1': 'FlowNet2/FlowNetSD/conv6_1',
'netsd_Convolution1': 'FlowNet2/FlowNetSD/predict_flow6',
'netsd_deconv5': 'FlowNet2/FlowNetSD/deconv5',
'netsd_upsample_flow6to5': 'FlowNet2/FlowNetSD/upsample_flow6to5',
'netsd_interconv5': 'FlowNet2/FlowNetSD/interconv5',
'netsd_Convolution2': 'FlowNet2/FlowNetSD/predict_flow5',
'netsd_deconv4': 'FlowNet2/FlowNetSD/deconv4',
'netsd_upsample_flow5to4': 'FlowNet2/FlowNetSD/upsample_flow5to4',
'netsd_interconv4': 'FlowNet2/FlowNetSD/interconv4',
'netsd_Convolution3': 'FlowNet2/FlowNetSD/predict_flow4',
'netsd_deconv3': 'FlowNet2/FlowNetSD/deconv3',
'netsd_upsample_flow4to3': 'FlowNet2/FlowNetSD/upsample_flow4to3',
'netsd_interconv3': 'FlowNet2/FlowNetSD/interconv3',
'netsd_Convolution4': 'FlowNet2/FlowNetSD/predict_flow3',
'netsd_deconv2': 'FlowNet2/FlowNetSD/deconv2',
'netsd_upsample_flow3to2': 'FlowNet2/FlowNetSD/upsample_flow3to2',
'netsd_interconv2': 'FlowNet2/FlowNetSD/interconv2',
'netsd_Convolution5': 'FlowNet2/FlowNetSD/predict_flow2',
# Fusion Net
'fuse_conv0': 'FlowNet2/fuse_conv0',
'fuse_conv1': 'FlowNet2/fuse_conv1',
'fuse_conv1_1': 'FlowNet2/fuse_conv1_1',
'fuse_conv2': 'FlowNet2/fuse_conv2',
'fuse_conv2_1': 'FlowNet2/fuse_conv2_1',
'fuse__Convolution5': 'FlowNet2/predict_flow2',
'fuse_deconv1': 'FlowNet2/fuse_deconv1',
'fuse_upsample_flow2to1': 'FlowNet2/fuse_upsample_flow2to1',
'fuse_interconv1': 'FlowNet2/fuse_interconv1',
'fuse__Convolution6': 'FlowNet2/predict_flow1',
'fuse_deconv0': 'FlowNet2/fuse_deconv0',
'fuse_upsample_flow1to0': 'FlowNet2/fuse_upsample_flow1to0',
'fuse_interconv0': 'FlowNet2/fuse_interconv0',
'fuse__Convolution7': 'FlowNet2/predict_flow0',
}
},
}
arch = None
# Setup variables to be injected into prototxt.template
# For now, use the dimensions of the Flying Chair Dataset
vars = {}
vars['TARGET_WIDTH'] = vars['ADAPTED_WIDTH'] = 512
vars['TARGET_HEIGHT'] = vars['ADAPTED_HEIGHT'] = 384
vars['SCALE_WIDTH'] = vars['SCALE_HEIGHT'] = 1.0
def main():
# Create tempfile to hold prototxt
tmp = tempfile.NamedTemporaryFile(mode='w', delete=True)
# Parse prototxt and inject `vars`
proto = open(arch['DEPLOY_PROTOTXT']).readlines()
for line in proto:
for key, value in vars.items():
tag = "$%s$" % key
line = line.replace(tag, str(value))
tmp.write(line)
tmp.flush()
# Instantiate Caffe Model
net = caffe.Net(tmp.name, arch['CAFFEMODEL'], caffe.TEST)
out = {}
for (caffe_param, tf_param) in arch['PARAMS'].items():
# Caffe stores weights as (channels_out, channels_in, h, w)
# but TF expects (h, w, channels_in, channels_out)
out[tf_param + '/weights'] = net.params[caffe_param][0].data.transpose((2, 3, 1, 0))
out[tf_param + '/biases'] = net.params[caffe_param][1].data
np.save(FLAGS.out, out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--out',
type=str,
required=True,
help='Output file path, eg /foo/bar.npy'
)
parser.add_argument(
'--arch',
type=str,
choices=['C', 'S', 'CS', 'CSS', 'CSS-ft-sd', 'SD', '2'],
required=True,
help='Name of the FlowNet arch: C, S, CS, CSS, CSS-ft-sd, SD or 2'
)
FLAGS = parser.parse_args()
arch = ARCHS[FLAGS.arch]
main()
| 2.484375
| 2
|
contests_atcoder/abc180/abc180f.py
|
takelifetime/competitive-programming
| 0
|
12774383
|
"""
dp[i][j][k] = i頂点n辺グラフ長さ最大値kの組み合わせ数
dp[i][j][k] = dp[i - k][n - usededge][_] for _ in range(k + 1)
よくわからん
"""
import math
from operator import mul
from functools import reduce
def comb(n, r):
r = min(r, n - r)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer // denom
MOD = 10 ** 9 + 7
n, m, l = map(int, input().split())
dp = [[[1] * (l + 1) for _ in range(m + 1)] for __ in range(n + 1)]
for i in range(n + 1):
for j in range(m + 1):
for k in range(1, l + 1):
if i - k >= 0 and j - k + 1 >= 0:
if k > 2:
dp[i][j][k] *= (k + 1) * math.factorial(k - 1) / 2 % MOD
for _ in range(1, k + 1):
dp[i][j][k] *= dp[i - k][j - k + 1][_]
dp[i][j][k] %= MOD
elif k == 1:
dp[i][j][k] *= 2
for _ in range(1, k + 1):
dp[i][j][k] *= dp[i - k][j - k + 1][_] + dp[i - k][j - k][_]
dp[i][j][k] %= MOD
dp[i][j][k] *= comb(i, k) % MOD
dp[i][j][k] %= MOD
print(dp[n][m][l])
| 2.53125
| 3
|
methodMap.py
|
DunderBird/dpylsp
| 2
|
12774384
|
'''
This file should not be exposed to the user
'''
from . import param as p
from enum import IntEnum
class WorkerType(IntEnum):
'''
select which worker to respond this event
'''
EDITOR = 1
NORMAL = 2
URGENT = 3 # events we need to respond immediately(shutdown...)
class MessageMap:
def __init__(self, rpctype, method=None, resultType=None, paramType=None, worker=WorkerType.NORMAL):
self.rpctype = rpctype
self.method = method
self.resultType = resultType
self.paramType = paramType
self.worker = worker
class N_Map(MessageMap):
'''
Notification map
'''
def __init__(self, method: str, paramType=None, worker=WorkerType.NORMAL):
super().__init__('Notification', method=method, paramType=paramType, worker=worker)
class Rq_Map(MessageMap):
'''
Request map
'''
def __init__(self, method: str, paramType=None, worker=WorkerType.NORMAL):
super().__init__('Request', method=method, paramType=paramType, worker=worker)
class Rp_Map(MessageMap):
'''
Response map
'''
def __init__(self, resultType, worker=WorkerType.NORMAL):
super().__init__('Response', resultType=resultType, worker=worker)
event_map = {
'initialize':
Rq_Map('onInitialize', p.InitializeParams, worker=WorkerType.URGENT),
'initialized':
N_Map('onInitialized', p.InitializedParams),
# shutdown shouldn't be dispatched to another thread because that thread may join itself
'shutdown':
Rq_Map('onShutdown', p.NullParams, worker=WorkerType.URGENT),
'exit':
N_Map('onExit', p.NullParams, worker=WorkerType.URGENT),
'textDocument/didOpen':
N_Map('onDidOpenTextDocument', p.DidOpenTextDocumentParams, worker=WorkerType.EDITOR),
'textDocument/didChange':
N_Map('onDidChangeTextDocument', p.DidChangeTextDocumentParams, worker=WorkerType.EDITOR),
'textDocument/didClose':
N_Map('onDidCloseTextDocument', p.DidCloseTextDocumentParams, worker=WorkerType.EDITOR),
'textDocument/didSave':
N_Map('onDidSaveTextDocument', p.DidSaveTextDocumentParams, worker=WorkerType.EDITOR),
'workspace/didChangeConfiguration':
N_Map('onDidChangeConfiguration', p.DidChangeConfigurationParams),
'workspace/didChangeWorkspaceFolders':
N_Map('onDidChangeWorkspaceFolders', p.DidChangeWorkspaceFoldersParams),
}
class CapabilityItem:
def __init__(self, client=None, server=None):
self.client = client
self.server = server
capability_map = {
'workspace/didChangeConfiguration': CapabilityItem(client='workspace.didChangeConfiguration'),
'workspace/configuration': CapabilityItem(client='workspace.configuration'),
'workspace/didChangeWatchedFiles': CapabilityItem(client='workspace.didChangeWatchedFiles'),
'workspace/symbol': CapabilityItem(client='workspace.symbol')
}
| 2.765625
| 3
|
python/app/plugins/http/Tomcat/ajpy.py
|
taomujian/linbing
| 351
|
12774385
|
<gh_stars>100-1000
#!/usr/bin/env python
import socket
import struct
def pack_string(s):
if s is None:
return struct.pack(">h", -1)
l = len(s)
return struct.pack(">H%dsb" % l, l, s.encode('utf8'), 0)
def unpack(stream, fmt):
size = struct.calcsize(fmt)
buf = stream.read(size)
return struct.unpack(fmt, buf)
def unpack_string(stream):
size, = unpack(stream, ">h")
if size == -1: # null string
return None
res, = unpack(stream, "%ds" % size)
stream.read(1) # \0
return res
class NotFoundException(Exception):
pass
class AjpBodyRequest(object):
# server == web server, container == servlet
SERVER_TO_CONTAINER, CONTAINER_TO_SERVER = range(2)
MAX_REQUEST_LENGTH = 8186
def __init__(self, data_stream, data_len, data_direction=None):
self.data_stream = data_stream
self.data_len = data_len
self.data_direction = data_direction
def serialize(self):
data = self.data_stream.read(AjpBodyRequest.MAX_REQUEST_LENGTH)
if len(data) == 0:
return struct.pack(">bbH", 0x12, 0x34, 0x00)
else:
res = struct.pack(">H", len(data))
res += data
if self.data_direction == AjpBodyRequest.SERVER_TO_CONTAINER:
header = struct.pack(">bbH", 0x12, 0x34, len(res))
else:
header = struct.pack(">bbH", 0x41, 0x42, len(res))
return header + res
def send_and_receive(self, socket, stream):
while True:
data = self.serialize()
socket.send(data)
r = AjpResponse.receive(stream)
while r.prefix_code != AjpResponse.GET_BODY_CHUNK and r.prefix_code != AjpResponse.SEND_HEADERS:
r = AjpResponse.receive(stream)
if r.prefix_code == AjpResponse.SEND_HEADERS or len(data) == 4:
break
class AjpForwardRequest(object):
_, OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, PROPFIND, PROPPATCH, MKCOL, COPY, MOVE, LOCK, UNLOCK, ACL, REPORT, VERSION_CONTROL, CHECKIN, CHECKOUT, UNCHECKOUT, SEARCH, MKWORKSPACE, UPDATE, LABEL, MERGE, BASELINE_CONTROL, MKACTIVITY = range(28)
REQUEST_METHODS = {'GET': GET, 'POST': POST, 'HEAD': HEAD, 'OPTIONS': OPTIONS, 'PUT': PUT, 'DELETE': DELETE, 'TRACE': TRACE}
# server == web server, container == servlet
SERVER_TO_CONTAINER, CONTAINER_TO_SERVER = range(2)
COMMON_HEADERS = ["SC_REQ_ACCEPT",
"SC_REQ_ACCEPT_CHARSET", "SC_REQ_ACCEPT_ENCODING", "SC_REQ_ACCEPT_LANGUAGE", "SC_REQ_AUTHORIZATION",
"SC_REQ_CONNECTION", "SC_REQ_CONTENT_TYPE", "SC_REQ_CONTENT_LENGTH", "SC_REQ_COOKIE", "SC_REQ_COOKIE2",
"SC_REQ_HOST", "SC_REQ_PRAGMA", "SC_REQ_REFERER", "SC_REQ_USER_AGENT"
]
ATTRIBUTES = ["context", "servlet_path", "remote_user", "auth_type", "query_string", "route", "ssl_cert", "ssl_cipher", "ssl_session", "req_attribute", "ssl_key_size", "secret", "stored_method"]
def __init__(self, data_direction=None):
self.prefix_code = 0x02
self.method = None
self.protocol = None
self.req_uri = None
self.remote_addr = None
self.remote_host = None
self.server_name = None
self.server_port = None
self.is_ssl = None
self.num_headers = None
self.request_headers = None
self.attributes = None
self.data_direction = data_direction
def pack_headers(self):
self.num_headers = len(self.request_headers)
res = ""
res = struct.pack(">h", self.num_headers)
for h_name in self.request_headers:
if h_name.startswith("SC_REQ"):
code = AjpForwardRequest.COMMON_HEADERS.index(h_name) + 1
res += struct.pack("BB", 0xA0, code)
else:
res += pack_string(h_name)
res += pack_string(self.request_headers[h_name])
return res
def pack_attributes(self):
res = b""
for attr in self.attributes:
a_name = attr['name']
code = AjpForwardRequest.ATTRIBUTES.index(a_name) + 1
res += struct.pack("b", code)
if a_name == "req_attribute":
aa_name, a_value = attr['value']
res += pack_string(aa_name)
res += pack_string(a_value)
else:
res += pack_string(attr['value'])
res += struct.pack("B", 0xFF)
return res
def serialize(self):
res = ""
res = struct.pack("bb", self.prefix_code, self.method)
res += pack_string(self.protocol)
res += pack_string(self.req_uri)
res += pack_string(self.remote_addr)
res += pack_string(self.remote_host)
res += pack_string(self.server_name)
res += struct.pack(">h", self.server_port)
res += struct.pack("?", self.is_ssl)
res += self.pack_headers()
res += self.pack_attributes()
if self.data_direction == AjpForwardRequest.SERVER_TO_CONTAINER:
header = struct.pack(">bbh", 0x12, 0x34, len(res))
else:
header = struct.pack(">bbh", 0x41, 0x42, len(res))
return header + res
def parse(self, raw_packet):
stream = StringIO(raw_packet)
self.magic1, self.magic2, data_len = unpack(stream, "bbH")
self.prefix_code, self.method = unpack(stream, "bb")
self.protocol = unpack_string(stream)
self.req_uri = unpack_string(stream)
self.remote_addr = unpack_string(stream)
self.remote_host = unpack_string(stream)
self.server_name = unpack_string(stream)
self.server_port = unpack(stream, ">h")
self.is_ssl = unpack(stream, "?")
self.num_headers, = unpack(stream, ">H")
self.request_headers = {}
for i in range(self.num_headers):
code, = unpack(stream, ">H")
if code > 0xA000:
h_name = AjpForwardRequest.COMMON_HEADERS[code - 0xA001]
else:
h_name = unpack(stream, "%ds" % code)
stream.read(1) # \0
h_value = unpack_string(stream)
self.request_headers[h_name] = h_value
def send_and_receive(self, socket, stream, save_cookies=False):
res = []
i = socket.sendall(self.serialize())
if self.method == AjpForwardRequest.POST:
return res
r = AjpResponse.receive(stream)
assert r.prefix_code == AjpResponse.SEND_HEADERS
res.append(r)
if save_cookies and 'Set-Cookie' in r.response_headers:
self.headers['SC_REQ_COOKIE'] = r.response_headers['Set-Cookie']
# read body chunks and end response packets
while True:
r = AjpResponse.receive(stream)
res.append(r)
if r.prefix_code == AjpResponse.END_RESPONSE:
break
elif r.prefix_code == AjpResponse.SEND_BODY_CHUNK:
continue
else:
raise NotImplementedError
break
return res
class AjpResponse(object):
_,_,_,SEND_BODY_CHUNK, SEND_HEADERS, END_RESPONSE, GET_BODY_CHUNK = range(7)
COMMON_SEND_HEADERS = [
"Content-Type", "Content-Language", "Content-Length", "Date", "Last-Modified",
"Location", "Set-Cookie", "Set-Cookie2", "Servlet-Engine", "Status", "WWW-Authenticate"
]
def parse(self, stream):
# read headers
self.magic, self.data_length, self.prefix_code = unpack(stream, ">HHb")
if self.prefix_code == AjpResponse.SEND_HEADERS:
self.parse_send_headers(stream)
elif self.prefix_code == AjpResponse.SEND_BODY_CHUNK:
self.parse_send_body_chunk(stream)
elif self.prefix_code == AjpResponse.END_RESPONSE:
self.parse_end_response(stream)
elif self.prefix_code == AjpResponse.GET_BODY_CHUNK:
self.parse_get_body_chunk(stream)
else:
raise NotImplementedError
def parse_send_headers(self, stream):
self.http_status_code, = unpack(stream, ">H")
self.http_status_msg = unpack_string(stream)
self.num_headers, = unpack(stream, ">H")
self.response_headers = {}
for i in range(self.num_headers):
code, = unpack(stream, ">H")
if code <= 0xA000: # custom header
h_name, = unpack(stream, "%ds" % code)
stream.read(1) # \0
h_value = unpack_string(stream)
else:
h_name = AjpResponse.COMMON_SEND_HEADERS[code-0xA001]
h_value = unpack_string(stream)
self.response_headers[h_name] = h_value
def parse_send_body_chunk(self, stream):
self.data_length, = unpack(stream, ">H")
self.data = stream.read(self.data_length+1)
def parse_end_response(self, stream):
self.reuse, = unpack(stream, "b")
def parse_get_body_chunk(self, stream):
rlen, = unpack(stream, ">H")
return rlen
@staticmethod
def receive(stream):
r = AjpResponse()
r.parse(stream)
return r
| 2.640625
| 3
|
pypStag/stagError.py
|
AlexandrePFJanin/pypStag
| 1
|
12774386
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 16:46:12 2019
@author: <NAME>
"""
"""Exceptions raised by pypStag"""
class PypStagError(Exception):
""" Main class for all pypStag """
pass
class PackageWarning(PypStagError):
"""Raised when a precise package is needed"""
def __init__(self,pack):
super().__init__('Error package import!\n'+\
'>> the following package is needed:'+pack)
class NoFileError(PypStagError):
"""Raised when stagData.import find no file during the importation"""
def __init__(self,directory,fname):
super().__init__('Error on the input data !\n'+\
'>> The expected following file does not exist !\n'+\
' | File requested: '+fname+'\n'+\
' | On directory : '+directory)
class StagTypeError(PypStagError):
"""Raised unexpected type"""
def __init__(self,givenType,expectedType):
super().__init__('Error on input type\n'+\
'Unexpected type given: '+str(givenType)+'\n'+\
'Expected type for input is: '+str(expectedType))
class InputGridGeometryError(PypStagError):
"""Raised when stagData.import have a wrong input geometry"""
def __init__(self,geom):
super().__init__('Error on the input geometry!\n'+\
"The proposed geometry '"+geom+"' is not contained in\n"+\
'the allowed geometries supported by stagData object.')
class CloudBuildIndexError(PypStagError):
"""Raised when stagCloudData have a wrong index input"""
def __init__(self,geom):
super().__init__('Error on the input index!\n'+\
"You have to set an 'indices' list or set a begining and end index and a file step.")
class GridGeometryInDevError(PypStagError):
"""Raised when stagData.import have an unconform input geometry"""
def __init__(self,geom):
super().__init__('Error on the input geometry !\n'+\
"The input geometry '"+geom+"' is not suported now\n"+\
'in the current version of pypStag... Be patient and take a coffee!')
class FieldTypeInDevError(PypStagError):
"""Raised when stagData.import have an unknown fieldType not yet supported"""
def __init__(self,fieldType):
super().__init__('Error on the input stagData.fieldType !\n'+\
"The input fieldType '"+fieldType+"' is not supported now\n"+\
'in the current versin of pypStag... Be patient and take a coffee !')
class GridGeometryError(PypStagError):
"""Raised when the geometry of a stagData object is not the expected
geometry"""
def __init__(self,INgeom,EXgeom):
super().__init__('Error on the input geometry !\n'+\
"The input geometry '"+INgeom+"' you chose during the construction\n"+\
'of the StagData object is not the one expected here!\n'+\
'Expected geometry corresponding to your input file: '+EXgeom)
class VisuGridGeometryError(PypStagError):
"""Raised when the geometry of a stagData object is not the expected
geometry for a visualization tool"""
def __init__(self,INgeom,EXgeom):
super().__init__('Error on the input geometry !\n'+\
"The input geometry '"+INgeom+"' of your StagData object is incoherent\n"+\
'with the function you are using or its input parameters!\n'+\
'Expected geometry: '+EXgeom)
class GridInterpolationError(PypStagError):
"""Raised when unknown input for interpolation grid"""
def __init__(self,interpGeom):
super().__init__('Error on the proposed interpolation grid!\n'+\
"The selected grid geometry '"+interpGeom+"' is not supported\n"+\
'for the moment or is wrong!')
class fieldTypeError(PypStagError):
"""Raised unexpected field type"""
def __init__(self,expectedFieldtype):
super().__init__('Error on the StagData Field Type\n'+\
'Unexpected value of StagData.fiedType\n'+\
'StagData.fieldType must be here: '+expectedFieldtype)
class SliceAxisError(PypStagError):
"""Raised when unknown axis is set in input"""
def __init__(self,wrongaxis):
super().__init__('Error in input axis!\n'+\
'Unexpected value of axis: '+str(wrongaxis))
class IncoherentSliceAxisError(PypStagError):
"""Raised when incoherent axis is set in input, incoherent according
to the grid geometry."""
def __init__(self,wrongaxis):
super().__init__('Error in input axis!\n'+\
'Incoherent value of axis: '+str(wrongaxis)+', with the grid geomtry:')
class MetaFileInappropriateError(PypStagError):
"""Raised when the reader function of StagMetaData recieved an inappropriate
file."""
def __init__(self,ftype,allowedType):
super().__init__('Error on the input of the meta file reader!\n'+\
'Inappropriate meta file in input.\n'+\
'Type you entered: '+ftype+'\n'+\
'Type must be in: \n'+\
str(allowedType))
class MetaCheckFieldUnknownError(PypStagError):
"""Raised when the reader function of StagMetaData recieved an inappropriate
file."""
def __init__(self,field,allowedFields):
super().__init__('Error on the input field of the StagMetaData.check() function\n'+\
'Unknown field: '+field+'\n'+\
'The input field must be in: \n'+\
str(allowedFields))
| 2.234375
| 2
|
context.py
|
E-tanok/NLTK_stackoverflow_recommender
| 1
|
12774387
|
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
if dir_path.split('\\')[0] == 'D:':
datasources_path = dir_path+"\datasources\\"
enrichment_path = dir_path+"\enrichment\\"
pickles_path = dir_path+"\pickles\\"
learning_models_path = dir_path+"\learning_models\\"
temp_files_path = dir_path+"\\tmp\\"
else:
datasources_path = dir_path+"/datasources/"
enrichment_path = dir_path+"/enrichment/"
pickles_path = dir_path+"/pickles/"
learning_models_path = dir_path+"/learning_models/"
temp_files_path = dir_path+"/tmp/"
| 2.5
| 2
|
Question-05/main.py
|
gajrajgchouhan/alcyone
| 0
|
12774388
|
<reponame>gajrajgchouhan/alcyone<filename>Question-05/main.py<gh_stars>0
from astropy.constants import *
import astropy.units as u
from math import sqrt
time_correction_on_earth = 1 / ( 1 - (2 * G * M_earth / (c*c*R_earth)) )**0.5
for _ in range(int(input())):
distance = float(input())
distance = u.Quantity(distance, unit=u.m)
v_square = (G * M_earth) / distance
time_correction_on_satellite = 1 / ( 1 - (2 * G * M_earth / (c*c*distance)) )**0.5
relativity_factor = 1 / (1 - (v_square / (c**2)))**0.5
print("{:.5e}".format(((time_correction_on_satellite - time_correction_on_earth) + (relativity_factor - 1)) * 86400))
| 2.96875
| 3
|
pythonTFlite/tfliteclassify.py
|
lkk688/AndroidIntelligentApp
| 1
|
12774389
|
<reponame>lkk688/AndroidIntelligentApp<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
from PIL import Image
import time
import io
import tflite_runtime.interpreter as tflite
def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
# Load TFLite model and allocate tensors.
interpreter = tflite.Interpreter(model_path='mobilenet/mobilenet_v1_1.0_224_quant.tflite')
interpreter.allocate_tensors()
# Get input tensor details
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
# check the type of the input tensor
input_details[0]['dtype']
floating_model = input_details[0]['dtype'] == np.float32
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open('data/00871.jpg').resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - 127.5) / 127.5
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
results = np.squeeze(output_data)
print(results)
top_k = results.argsort()[-5:][::-1]
print(top_k)
labels = load_labels('mobilenet/labels_mobilenet_quant_v1_224.txt')
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))
| 2.171875
| 2
|
tests/integration/test_weights_loading.py
|
jina-ai/encoder-image-torch
| 4
|
12774390
|
<filename>tests/integration/test_weights_loading.py<gh_stars>1-10
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from executor.torch_encoder import ImageTorchEncoder
from pytest_mock import MockerFixture
from torch import hub
def test_load_from_url(tmpdir: str, mocker: MockerFixture) -> None:
os.environ['TORCH_HOME'] = str(tmpdir)
spy = mocker.spy(hub, 'urlopen')
_ = ImageTorchEncoder(model_name='mobilenet_v2')
assert os.path.isfile(
os.path.join(tmpdir, 'hub', 'checkpoints', 'mobilenet_v2-b0353104.pth')
)
assert spy.call_count == 1
| 1.851563
| 2
|
tests/test_coin.py
|
AustEcon/bitcoinX
| 0
|
12774391
|
import pytest
from bitcoinx import (
hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str,
)
from bitcoinx.coin import *
header_400k = (
b'\x04\x00\x00\x009\xfa\x82\x18Hx\x1f\x02z.m\xfa\xbb\xf6\xbd\xa9 \xd9'
b'\xaea\xb64\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\xec\xaeSj0@B\xe3'
b'\x15K\xe0\xe3\xe9\xa8"\x0eUh\xc3C:\x9a\xb4\x9a\xc4\xcb\xb7O\x8d\xf8'
b'\xe8\xb0\xcc*\xcfV\x9f\xb9\x06\x18\x06e,\''
)
@pytest.mark.parametrize("raw_header,header_hash,version,prev_hash,"
"merkle_root,timestamp,bits,nonce", (
(
Bitcoin.genesis_header,
'000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f',
1,
'0000000000000000000000000000000000000000000000000000000000000000',
'4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b',
1231006505,
486604799,
2083236893
),
(
header_400k,
'000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f',
4,
'0000000000000000030034b661aed920a9bdf6bbfa6d2e7a021f78481882fa39',
'b0e8f88d4fb7cbc49ab49a3a43c368550e22a8e9e3e04b15e34240306a53aeec',
1456417484,
403093919,
657220870
),
))
def test_Bitcoin(raw_header, header_hash, version, prev_hash, merkle_root,
timestamp, bits, nonce):
header_hash = hex_str_to_hash(header_hash)
prev_hash = hex_str_to_hash(prev_hash)
merkle_root = hex_str_to_hash(merkle_root)
assert Bitcoin.header_hash(raw_header) == header_hash
assert Bitcoin.header_prev_hash(raw_header) == prev_hash
assert Bitcoin.header_work(raw_header) == bits_to_work(bits)
assert Bitcoin.header_timestamp(raw_header) == timestamp
header = Bitcoin.deserialized_header(raw_header, 0)
assert header.version == version
assert header.prev_hash == prev_hash
assert header.merkle_root == merkle_root
assert header.timestamp == timestamp
assert header.bits == bits
assert header.nonce == nonce
assert header.raw == raw_header
assert header.hash == header_hash
assert header.height == 0
assert header.work() == Bitcoin.header_work(raw_header)
assert header.target() == bits_to_target(bits)
assert header.hash_value() == hash_to_value(header_hash)
assert header.hex_str() == hash_to_hex_str(header_hash)
assert 'height=0' in str(header)
def test_from_WIF_byte():
for coin in all_coins:
if coin is BitcoinScalingTestnet:
# Testnet has the same identifiers as scaling testnet, as the latter is dumbed down.
assert Coin.from_WIF_byte(coin.WIF_byte) is BitcoinTestnet
else:
assert Coin.from_WIF_byte(coin.WIF_byte) is coin
with pytest.raises(ValueError):
Coin.from_WIF_byte(0x01)
def test_lookup_xver_bytes():
for coin in all_coins:
if coin is BitcoinScalingTestnet:
# Testnet has the same identifiers as scaling testnet, as the latter is dumbed down.
assert Coin.lookup_xver_bytes(coin.xpub_verbytes) == (BitcoinTestnet, True)
assert Coin.lookup_xver_bytes(coin.xprv_verbytes) == (BitcoinTestnet, False)
else:
assert Coin.lookup_xver_bytes(coin.xpub_verbytes) == (coin, True)
assert Coin.lookup_xver_bytes(coin.xprv_verbytes) == (coin, False)
with pytest.raises(ValueError):
Coin.lookup_xver_bytes(bytes.fromhex("043587ff"))
def test_P2SH_verbyte():
assert Bitcoin.P2SH_verbyte == 0x05
assert BitcoinTestnet.P2SH_verbyte == BitcoinScalingTestnet.P2SH_verbyte == 0xc4
| 2.09375
| 2
|
tensorflow_lattice/python/estimators/calibrated_etl_test.py
|
agiledots/study-lattice
| 0
|
12774392
|
# Copyright 2017 The TensorFlow Lattice Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CalibratedEtl tests."""
# Dependency imports
import numpy as np
from tensorflow_lattice.python.estimators import calibrated_etl
from tensorflow_lattice.python.estimators import hparams as tfl_hparams
from tensorflow_lattice.python.lib import keypoints_initialization
from tensorflow_lattice.python.lib import test_data
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.platform import test
_NUM_KEYPOINTS = 50
class CalibratedEtlHParamsTest(test.TestCase):
def testEmptyMonotonicLatticeRankExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('monotonic_num_lattices', 2)
hparams.set_param('monotonic_lattice_size', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyMonotonicLatticeSizeExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('monotonic_num_lattices', 2)
hparams.set_param('monotonic_lattice_rank', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyNonMonotonicLatticeRankExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_size', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyNonMonotonicLatticeSizeExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_rank', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testWrongLatticeRegularization(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_size', 2)
hparams.set_param('nno_monotonic_lattice_rank', 2)
hparams.set_feature_param('x', 'lattice_l1_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l2_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
class CalibratedEtlTest(test.TestCase):
def setUp(self):
super(CalibratedEtlTest, self).setUp()
self._test_data = test_data.TestData()
def _CalibratedEtlRegressor(self, feature_names, feature_columns,
**hparams_args):
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
_NUM_KEYPOINTS, -1., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
feature_names,
num_keypoints=_NUM_KEYPOINTS,
monotonic_num_lattices=1,
monotonic_lattice_rank=1,
monotonic_lattice_size=2,
non_monotonic_num_lattices=1,
non_monotonic_lattice_rank=1,
non_monotonic_lattice_size=2,
**hparams_args)
# Turn off monotonic calibrator.
hparams.set_param('calibration_monotonic', None)
hparams.set_param('learning_rate', 0.1)
return calibrated_etl.calibrated_etl_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
def _CalibratedEtlClassifier(self, feature_columns, **hparams_args):
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
_NUM_KEYPOINTS, -1., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
num_keypoints=_NUM_KEYPOINTS,
monotonic_num_lattices=1,
monotonic_lattice_rank=1,
monotonic_lattice_size=2,
non_monotonic_num_lattices=1,
non_monotonic_lattice_rank=1,
non_monotonic_lattice_size=2,
**hparams_args)
# Turn off monotonic calibrator.
hparams.set_param('calibration_monotonic', None)
hparams.set_param('learning_rate', 0.1)
return calibrated_etl.calibrated_etl_classifier(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
def testCalibratedEtlRegressorTraining1D(self):
feature_columns = [
feature_column_lib.numeric_column('x'),
]
estimator = self._CalibratedEtlRegressor(
['x'], feature_columns, interpolation_type='simplex')
estimator.train(input_fn=self._test_data.oned_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.oned_input_fn())
def testCalibratedEtlRegressorTraining2D(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'], feature_columns, interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
def testCalibratedEtlRegressorTraining2DWithCalbrationRegularization(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'],
feature_columns,
interpolation_type='simplex',
calibration_l1_reg=1e-2,
calibration_l2_reg=1e-2,
calibration_l1_laplacian_reg=0.05,
calibration_l2_laplacian_reg=0.01)
estimator.train(input_fn=self._test_data.twod_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
def testCalibratedEtlRegressorTraining2DWithLatticeRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'],
feature_columns,
interpolation_type='simplex',
lattice_l1_reg=1.0,
lattice_l2_reg=1.0,
lattice_l1_torsion_reg=1.0,
lattice_l2_torsion_reg=1.0,
lattice_l1_laplacian_reg=1.0,
lattice_l2_laplacian_reg=1.0)
estimator.train(input_fn=self._test_data.twod_input_fn())
results = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
# We expect the worse result due to the calibration regularization.
self.assertGreater(results['average_loss'], 3e-3)
self.assertLess(results['average_loss'], 4e-2)
def testCalibratedEtlRegressorTrainingMultiDimensionalFeature(self):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(2,)),
]
estimator = self._CalibratedEtlRegressor(['x'], feature_columns)
estimator.train(input_fn=self._test_data.multid_feature_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.multid_feature_input_fn())
self.assertLess(results['average_loss'], 1e-2)
# Turn-off calibration for feature 'x', it should turn off for both
# dimensions, and the results should get much worse.
estimator = self._CalibratedEtlRegressor(
['x'], feature_columns, feature__x__num_keypoints=0)
estimator.train(input_fn=self._test_data.multid_feature_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.multid_feature_input_fn())
self.assertGreater(results['average_loss'], 1e-2)
def testCalibratedEtlClassifierTraining(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(feature_columns)
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
self.assertGreater(results['accuracy'], 0.97)
def testCalibratedEtlClassifierTrainingWithCalibrationRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(
feature_columns,
calibration_l1_reg=1e-2,
calibration_l2_reg=1e-2,
calibration_l1_laplacian_reg=1e-1,
calibration_l2_laplacian_reg=1e-1,
interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
def testCalibratedEtlClassifierTrainingWithLatticeRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(
feature_columns,
lattice_l1_reg=1.0,
lattice_l2_reg=1.0,
lattice_l1_torsion_reg=1.0,
lattice_l2_torsion_reg=1.0,
lattice_l1_laplacian_reg=1.0,
lattice_l2_laplacian_reg=1.0,
interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
# Due to regularizer, we expect the worse performance.
self.assertLess(results['accuracy'], 0.97)
self.assertGreater(results['accuracy'], 0.8)
def testCalibratedEtlMonotonicClassifierTraining(self):
# Construct the following training pair.
#
# Training: (x, y)
# ([0., 0.], 0.0)
# ([0., 1.], 1.0)
# ([1., 0.], 1.0)
# ([1., 1.], 0.0)
#
# which is not a monotonic function. Then check the forcing monotonicity
# resulted in the following monotonicity or not.
# f(0, 0) <= f(0, 1), f(0, 0) <= f(1, 0), f(0, 1) <= f(1, 1),
# f(1, 0) < = f(1, 1).
x0 = np.array([0.0, 0.0, 1.0, 1.0])
x1 = np.array([0.0, 1.0, 0.0, 1.0])
x_samples = {'x0': x0, 'x1': x1}
training_y = np.array([[False], [True], [True], [False]])
train_input_fn = numpy_io.numpy_input_fn(
x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False)
test_input_fn = numpy_io.numpy_input_fn(x=x_samples, y=None, shuffle=False)
# Define monotonic lattice classifier.
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
2, 0., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
num_keypoints=2,
monotonic_num_lattices=2,
monotonic_lattice_rank=2,
monotonic_lattice_size=2)
hparams.set_param('calibration_monotonic', +1)
hparams.set_param('lattice_monotonic', True)
hparams.set_param('learning_rate', 0.1)
estimator = calibrated_etl.calibrated_etl_classifier(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
estimator.train(input_fn=train_input_fn)
predictions = [
results['logits'][0]
for results in estimator.predict(input_fn=test_input_fn)
]
self.assertEqual(len(predictions), 4)
# Check monotonicity. Note that projection has its own precision, so we
# add a small number.
self.assertLess(predictions[0], predictions[1] + 1e-6)
self.assertLess(predictions[0], predictions[2] + 1e-6)
self.assertLess(predictions[1], predictions[3] + 1e-6)
self.assertLess(predictions[2], predictions[3] + 1e-6)
def testCalibratedEtlWithMissingTraining(self):
# x0 is missing with it's own vertex: so it can take very different values,
# while x1 is missing and calibrated, in this case to the middle of the
# lattice.
x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
x_samples = {'x0': x0, 'x1': x1}
train_input_fn = numpy_io.numpy_input_fn(
x=x_samples,
y=training_y,
batch_size=x0.shape[0],
num_epochs=2000,
shuffle=False)
test_input_fn = numpy_io.numpy_input_fn(
x=x_samples, y=training_y, shuffle=False)
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
2, 0., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
['x0', 'x1'],
num_keypoints=2,
non_monotonic_num_lattices=5,
non_monotonic_lattice_rank=2,
non_monotonic_lattice_size=2,
learning_rate=0.1,
missing_input_value=-1.)
estimator = calibrated_etl.calibrated_etl_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
self.assertLess(results['average_loss'], 0.1)
if __name__ == '__main__':
test.main()
| 2
| 2
|
mjolnir/utilities/training_pipeline.py
|
kdhingra307/ncm
| 0
|
12774393
|
"""
Example script demonstrating the training portion of the MLR pipeline.
This is mostly to demonstrate how everything ties together
To run:
PYSPARK_PYTHON=venv/bin/python spark-submit \
--jars /path/to/mjolnir-with-dependencies.jar \
--artifacts 'mjolnir_venv.zip#venv' \
path/to/training_pipeline.py
"""
from __future__ import absolute_import
import argparse
import datetime
import glob
import json
import logging
import mjolnir.feature_engineering
import mjolnir.training.xgboost
from mjolnir.utils import hdfs_open_read
import os
import pickle
from pyspark import SparkContext
from pyspark.sql import HiveContext
import sys
def run_pipeline(
sc, sqlContext, input_dir, output_dir, wikis, initial_num_trees,
final_num_trees, num_cv_jobs, iterations
):
with hdfs_open_read(os.path.join(input_dir, 'stats.json')) as f:
stats = json.loads(f.read())
wikis_available = set(stats['wikis'].keys())
if wikis:
missing = set(wikis).difference(wikis_available)
if missing:
raise Exception("Wikis not available: " + ", ".join(missing))
wikis = wikis_available.intersection(wikis)
else:
wikis = stats['wikis'].keys()
if not wikis:
raise Exception("No wikis provided")
for wiki in wikis:
config = stats['wikis'][wiki]
print('Training wiki: %s' % (wiki))
num_folds = config['num_folds']
if num_cv_jobs is None:
num_cv_jobs = num_folds
# Add extension matching training type
extension = ".xgb"
# Add file extensions to all the folds
folds = config['folds']
for fold in folds:
for partition in fold:
for name, path in partition.items():
partition[name] = path + extension
# "all" data with no splits
all_paths = config['all']
for partition in all_paths:
for name, path in partition.items():
partition[name] = path + extension
tune_results = mjolnir.training.xgboost.tune(
folds, config['stats'],
num_cv_jobs=num_cv_jobs,
train_matrix="train",
initial_num_trees=initial_num_trees,
final_num_trees=final_num_trees,
iterations=iterations)
print('CV test-ndcg@10: %.4f' % (tune_results['metrics']['cv-test']))
print('CV train-ndcg@10: %.4f' % (tune_results['metrics']['cv-train']))
tune_results['metadata'] = {
'wiki': wiki,
'input_dir': input_dir,
'training_datetime': datetime.datetime.now().isoformat(),
'dataset': config['stats'],
}
# Train a model over all data with best params.
best_params = tune_results['params'].copy()
print('Best parameters:')
for param, value in best_params.items():
print('\t%20s: %s' % (param, value))
model = mjolnir.training.xgboost.train(
all_paths, best_params, train_matrix="all")
tune_results['metrics'] = {
'train': model.summary.train()
}
print('train-ndcg@10: %.5f' % (tune_results['metrics']['train'][-1]))
# Save the tune results somewhere for later analysis. Use pickle
# to maintain the hyperopt.Trials objects as is. It might be nice
# to write out a json version, but the Trials objects require
# some more work before they can be json encoded.
tune_output_pickle = os.path.join(output_dir, 'tune_%s.pickle' % (wiki))
with open(tune_output_pickle, 'wb') as f:
# TODO: This includes special hyperopt and mjolnir objects, it would
# be nice if those could be converted to something simple like dicts
# and output json instead of pickle. This would greatly simplify
# post-processing.
f.write(pickle.dumps(tune_results))
print('Wrote tuning results to %s' % (tune_output_pickle))
# Generate a feature map so xgboost can include feature names in the dump.
# The final `q` indicates all features are quantitative values (floats).
if 'wiki_features' in config['stats']:
features = config['stats']['wiki_features'][wiki]
else:
features = config['stats']['features']
json_model_output = os.path.join(output_dir, 'model_%s.json' % (wiki))
with open(json_model_output, 'w') as f:
# The 'unused' first feature is because DataWriter creates datafiles
# that start at index 1 to support xgboost and lightgbm from the same
# file.
f.write(model.dump(['unused'] + features))
print('Wrote xgboost json model to %s' % (json_model_output))
# Write out the xgboost binary format as well, so it can be re-loaded
# and evaluated
model_output = os.path.join(output_dir, 'model_%s.xgb' % (wiki))
model.saveModelAsLocalFile(model_output)
print('Wrote xgboost binary model to %s' % (model_output))
print('')
def arg_parser():
parser = argparse.ArgumentParser(description='Train XGBoost ranking models')
parser.add_argument(
'-i', '--input', dest='input_dir', type=str, required=True,
help='Input path, prefixed with hdfs://, to dataframe with labels and features')
parser.add_argument(
'-o', '--output', dest='output_dir', type=str, required=True,
help='Path, on local filesystem, to directory to store the results of '
'model training to.')
parser.add_argument(
'-c', '--cv-jobs', dest='num_cv_jobs', default=None, type=int,
help='Number of cross validation folds to perform in parallel. Defaults to number '
+ 'of folds, to run all in parallel. If this is a multiple of the number '
+ 'of folds multiple cross validations will run in parallel.')
parser.add_argument(
'--initial-trees', dest='initial_num_trees', default=100, type=int,
help='Number of trees to perform hyperparamter tuning with. (Default: 100)')
parser.add_argument(
'--final-trees', dest='final_num_trees', default=None, type=int,
help='Number of trees in the final ensemble. If not provided the value from '
+ '--initial-trees will be used. (Default: None)')
parser.add_argument(
'-t', '--iterations', dest='iterations', default=150, type=int,
help='The number of hyperparameter tuning iterations to perform')
parser.add_argument(
'wikis', metavar='wiki', type=str, nargs='*',
help='A wiki to perform model training for.')
return parser
def main(**kwargs):
# TODO: Set spark configuration? Some can't actually be set here though, so best might be to set all of it
# on the command line for consistency.
app_name = "MLR: training pipeline xgboost"
if kwargs['wikis']:
app_name += ': ' + ', '.join(kwargs['wikis'])
sc = SparkContext(appName=app_name)
sc.setLogLevel('WARN')
sqlContext = HiveContext(sc)
output_dir = kwargs['output_dir']
if os.path.exists(output_dir):
logging.error('Output directory (%s) already exists' % (output_dir))
sys.exit(1)
# Maybe this is a bit early to create the path ... but should be fine.
# The annoyance might be that an error in training requires deleting
# this directory to try again.
os.mkdir(output_dir)
try:
run_pipeline(sc, sqlContext, **kwargs)
except: # noqa: E722
# If the directory we created is still empty delete it
# so it doesn't need to be manually re-created
if not len(glob.glob(os.path.join(output_dir, '*'))):
os.rmdir(output_dir)
raise
if __name__ == "__main__":
logging.basicConfig()
kwargs = dict(vars(arg_parser().parse_args()))
main(**kwargs)
| 2.625
| 3
|
accounts/forms.py
|
dertrockx/social-media-rant
| 1
|
12774394
|
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from . import models
class UserCreateForm(UserCreationForm):
class Meta:
fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>')
model = get_user_model()
class UserProfileForm(forms.ModelForm):
class Meta:
fields = ('profile_picture',)
model = models.UserProfile
| 2.296875
| 2
|
base/media.py
|
danielecook/upvote.pub
| 1
|
12774395
|
# -*- coding: utf-8 -*-
"""
All code for scraping images and videos from posted
links go in this file.
"""
#import BeautifulSoup
import requests
from urllib.parse import urlparse, urlunparse, urljoin
img_extensions = ['jpg', 'jpeg', 'gif', 'png', 'bmp']
def make_abs(url, img_src):
domain = urlparse(url).netloc
scheme = urlparse(url).scheme
baseurl = scheme + '://' + domain
return urljoin(baseurl, img_src)
def clean_url(url):
frag = urlparse(url)
frag = frag._replace(query='', fragment='')
return urlunparse(frag)
def get_top_img(url, timeout=4):
"""
Nothing fancy here, we merely check if the page author
set a designated image or if the url itself is an image.
This method could be mutch better but we are favoring ease
of installation and simplicity of speed.
"""
if not url:
return None
url = clean_url(url)
# if the url is referencing an img itself, return it
if url.split('.')[-1].lower() in img_extensions:
return url
try:
pass
except Exception as e:
print('FAILED WHILE EXTRACTING THREAD IMG', str(e))
return None
return None
| 3.03125
| 3
|
keyword_annotator.py
|
ecohealthalliance/grits-api
| 3
|
12774396
|
<reponame>ecohealthalliance/grits-api<filename>keyword_annotator.py
#!/usr/bin/env python
"""Keyword Annotator"""
from __future__ import absolute_import
from collections import defaultdict
from epitator.annotator import Annotator, AnnoTier, AnnoSpan
from epitator.ngram_annotator import NgramAnnotator
import os
import pickle
import six
class KeywordAnnotator(Annotator):
keyword_types = ['diseases', 'hosts', 'modes', 'pathogens', 'symptoms']
keyword_type_map = {
'doid/diseases': 'diseases',
'eha/disease': 'diseases',
'pm/disease': 'diseases',
'hm/disease': 'diseases',
'biocaster/diseases': 'diseases',
'eha/symptom': 'symptoms',
'biocaster/symptoms': 'symptoms',
'doid/has_symptom': 'symptoms',
'pm/symptom': 'symptoms',
'symp/symptoms': 'symptoms',
'wordnet/hosts': 'hosts',
'eha/vector': 'hosts',
'wordnet/pathogens': 'pathogens',
'biocaster/pathogens': 'pathogens',
'pm/mode of transmission': 'modes',
'doid/transmitted_by': 'modes',
'eha/mode of transmission': 'modes'
}
def __init__(self, db=None):
with open(os.environ.get('KEYWORD_PICKLE_PATH') or 'current_classifier/keyword_array.p', 'rb') as f:
args = dict()
if six.PY3:
args = dict(fix_imports=True)
keyword_array = pickle.load(f, **args)
self.keywords = defaultdict(dict)
for keyword in keyword_array:
if keyword['category'] in self.keyword_type_map:
keyword_type = self.keyword_type_map[keyword['category']]
self.keywords[keyword_type][keyword['keyword'].lower()] = [
keyword['keyword'], keyword['case_sensitive']]
def annotate(self, doc):
if 'ngrams' not in doc.tiers:
doc.add_tiers(NgramAnnotator())
ngram_spans_by_lowercase = defaultdict(list)
for ngram_span in doc.tiers['ngrams'].spans:
ngram_spans_by_lowercase[ngram_span.text.lower()].append(
ngram_span)
ngrams = list(ngram_spans_by_lowercase.keys())
for keyword_type, keywords in self.keywords.items():
keyword_spans = []
for keyword in set(keywords.keys()).intersection(ngrams):
true_case = keywords[keyword][0]
case_sensitive = keywords[keyword][1]
for ngram_span in ngram_spans_by_lowercase[keyword]:
if not case_sensitive or ngram_span.text == true_case:
if case_sensitive:
label = true_case
else:
label = keyword
keyword_spans.append(
AnnoSpan(ngram_span.start, ngram_span.end, doc, label=label))
doc.tiers['keyword.' + keyword_type] = AnnoTier(keyword_spans).optimal_span_set()
return doc
| 2.265625
| 2
|
FaceSwap/Code/traditional/main.py
|
akathpal/ComputerVision-CMSC733
| 1
|
12774397
|
<filename>FaceSwap/Code/traditional/main.py
from tps import thinPlateSpline
from triangulation import triangulation
from facial_landmarks import *
import numpy as np
import cv2
def blending(img1Warped,hull2,img2):
# Calculate Mask
hull8U = []
for i in xrange(0, len(hull2)):
hull8U.append((hull2[i][0], hull2[i][1]))
mask = np.zeros(img2.shape, dtype = img2.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
r = cv2.boundingRect(np.float32([hull2]))
center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
# Clone seamlessly.
output = cv2.seamlessClone(np.uint8(img1Warped), img2, mask, center, cv2.NORMAL_CLONE)
return output
def traditional(img1,img2,points1,points2,method):
img1Warped = np.copy(img2);
# Find convex hull
hull1 = []
hull2 = []
hullIndex = cv2.convexHull(np.array(points2), returnPoints = False)
# print(len(hullIndex))
for i in xrange(0, len(hullIndex)):
hull1.append(points1[int(hullIndex[i])])
hull2.append(points2[int(hullIndex[i])])
if(method=="tps"):
img1Warped = thinPlateSpline(img1,img1Warped,points1,points2,hull2)
# cv2.imshow("Face Warped", img1warped)
# cv2.waitKey(2000)
# cv2.destroyAllWindows()
elif(method=="affine" or method=="tri"):
img1Warped = triangulation(img1,img2,img1Warped,hull1,hull2,method)
output = blending(img1Warped,hull2,img2)
return output
| 2.40625
| 2
|
tasks.py
|
JJ/More-mangas
| 0
|
12774398
|
from invoke import task, run
@task
def check(c):
'''
Comprobador de la sintaxis del proyecto
'''
print("Comprobando sintaxis...")
run("pyflakes code") # El directorio con todo el código de la aplicación
@task
def test(c):
'''
Realiza los tests del proyecto
'''
print("Realizando los tests...")
run("pytest")
| 1.945313
| 2
|
metadata/resources/ibis.py
|
bmampaey/SDA
| 0
|
12774399
|
<reponame>bmampaey/SDA
# Generated by command write_metadata_files version 1
from metadata.models import Ibis
from .base_metadata import BaseMetadataResource
__all__ = ['IbisResource']
class IbisResource(BaseMetadataResource):
'''RESTful resource for model Ibis'''
class Meta(BaseMetadataResource.Meta):
abstract = False
queryset = Ibis.objects.all()
resource_name = 'metadata_ibis'
| 1.726563
| 2
|
binstar_client/tests/runner.py
|
rpk101/anaconda-client
| 98
|
12774400
|
<gh_stars>10-100
'''
Created on Sep 23, 2013
@author: sean
'''
from unittest.runner import TextTestRunner, TextTestResult
from unittest.signals import registerResult
import time
import sys
WARNING = '\033[33m'
OKBLUE = '\033[34m'
OKGREEN = '\033[32m'
FAIL = '\033[31m'
ENDC = '\033[0m'
BOLD = "\033[1m"
def green(text):
return BOLD + OKGREEN + text + ENDC
def red(text):
return BOLD + FAIL + text + ENDC
def orange(text):
return WARNING + text + ENDC
def blue(text):
return OKBLUE + text + ENDC
class ColorTextTestResult(TextTestResult):
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln(green("ok"))
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln(red("ERROR"))
elif self.dots:
self.stream.write(red('E'))
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln(red("FAIL"))
elif self.dots:
self.stream.write(red('F'))
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln(blue("skipped {0!r}".format(reason)))
elif self.dots:
self.stream.write(blue("s"))
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln(blue("expected failure"))
elif self.dots:
self.stream.write(blue("x"))
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln(blue("unexpected success"))
elif self.dots:
self.stream.write(blue("u"))
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList(red('ERROR'), self.errors)
self.printErrorList(red('FAIL'), self.failures)
class ColorTextTestRunner(TextTestRunner):
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=ColorTextTestResult):
TextTestRunner.__init__(self, stream=stream, descriptions=descriptions,
verbosity=verbosity, failfast=failfast, buffer=buffer,
resultclass=resultclass)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
return result
def write_end(self, result, coverage):
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write('Tests: ' + red("FAILED"))
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write('Tests: ' + green("OK"))
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.write(" (%s)" % (", ".join(infos),))
perc = coverage.pc_covered
color = green
if perc < 80:
color = blue
if perc < 50:
color = red
cv = color("%i%%" % (int(coverage.pc_covered)))
self.stream.write(", Coverage: %s \n" % (cv))
| 2.5625
| 3
|
products/tests/factories.py
|
kevin-ci/janeric2
| 1
|
12774401
|
# From hacksoft.io/improve-your-tests-django-fakes-and-factories/
import factory
from faker import Faker
from factory import lazy_attribute
fake = Faker()
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'products.Category'
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: 'Category{0}' .format(n))
division = factory.Faker('word')
class Product_FamilyFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'products.Product_Family'
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: 'Product_Family{0}' .format(n))
brand_name = factory.Faker('word')
class ProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'products.Product'
id = factory.Sequence(lambda n: n)
category = factory.SubFactory(CategoryFactory)
product_family = factory.SubFactory(Product_FamilyFactory)
name = factory.Sequence(lambda n: 'Test Product {0}' .format(n))
price = factory.Faker("random_int", min=2, max=1500)
active = factory.Faker("boolean", chance_of_getting_true=90)
| 2.484375
| 2
|
tifa/apps/admin/app.py
|
twocucao/tifa
| 71
|
12774402
|
from fastapi_utils.api_model import APIModel
from tifa.apps.admin.local import g
from tifa.apps.admin.router import bp
from tifa.models.app import App
class TApp(APIModel):
id: str
name: str
@bp.list("/apps", out=TApp, summary="App", tags=["App"])
def apps_list():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.item("/app", out=TApp, summary="App", tags=["App"])
def app_item():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.item("/app/fetch_manifest", out=TApp, summary="App", tags=["App"])
def app_fetch_manifest():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/create", out=TApp, summary="App", tags=["App"])
def app_create():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/update", out=TApp, summary="App", tags=["App"])
def app_update():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/activate", out=TApp, summary="App", tags=["App"])
def app_activate():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/deactivate", out=TApp, summary="App", tags=["App"])
def app_deactivate():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/delete", out=TApp, summary="App", tags=["App"])
def app_delete():
ins = g.adal.first_or_404(App)
return {"items": ins}
class TAppInstallation(APIModel):
id: str
name: str
@bp.list(
"/app/installations", out=TAppInstallation, summary="AppInstallation", tags=["App"]
)
def app_installations():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.item(
"/app/installation", out=TAppInstallation, summary="AppInstallation", tags=["App"]
)
def app_installation():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.item(
"/app/delete_failed_installation",
out=TAppInstallation,
summary="AppInstallation",
tags=["App"],
)
def app_delete_failed_installation():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/install", out=TApp, summary="App", tags=["App"])
def app_install():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/retry_install", out=TApp, summary="App", tags=["App"])
def app_retry_install():
ins = g.adal.first_or_404(App)
return {"items": ins}
class TAppToken(APIModel):
id: str
name: str
@bp.op("/app/token_create", out=TAppToken, summary="AppInstallation", tags=["App"])
def app_token_create():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/token_update", out=TAppToken, summary="AppInstallation", tags=["App"])
def app_token_update():
ins = g.adal.first_or_404(App)
return {"items": ins}
@bp.op("/app/token_verify", out=TAppToken, summary="AppInstallation", tags=["App"])
def app_token_verify():
ins = g.adal.first_or_404(App)
return {"items": ins}
| 2.1875
| 2
|
cadnano/views/pathview/__init__.py
|
sherwoodyao/cadnano2.5
| 69
|
12774403
|
# -*- coding: utf-8 -*-
from cadnano.util import to_dot_path
pp = to_dot_path(__file__)
PathNucleicAcidPartItemT = pp + '.nucleicacidpartitem.PathNucleicAcidPartItem'
PathVirtualHelixItemT = pp + 'virtualhelixitem.PathVirtualHelixItem'
PathStrandItemT = pp + 'strand.stranditem.StrandItem'
PathEndpointItemT = pp + 'strand.endpointitem.EndpointItem'
PathXoverItemT = pp + 'strand.xoveritem.XoverItem'
PathRootItemT = pp + 'pathrootitem.PathRootItem'
PathToolManagerT = pp + 'tools.pathtoolmanager.PathToolManager'
AbstractPathToolT = pp + 'tools.abstractpathtool.AbstractPathTool'
PreXoverManagerT = pp + '.prexovermanager.PreXoverManager'
| 1.335938
| 1
|
app/bda_core/entities/file/json_handler.py
|
bda-19fs/bda-chatbot
| 1
|
12774404
|
<filename>app/bda_core/entities/file/json_handler.py
import json
def from_str_to_json(line):
'''
Converts the given json string to a python dict.
:param line: The input json string
:return: A python dict
'''
return json.loads(line, encoding='utf8')
def dump_json(json_doc):
'''
Converts the given json document (dict) to a json string.
The ensure_ascii flag is necessary that the json module
doesn't use ascii for the encoding.
:param json_doc: The input dict
:return: A json string
'''
return json.dumps(json_doc, ensure_ascii=False)
| 3.234375
| 3
|
discordbot/utils/torrent.py
|
he305/discordbot
| 0
|
12774405
|
import asyncio
import aiohttp
from urllib.parse import urlparse
import shutil
from discordbot.hidden_data import USER_TORRENT, PASSWORD_TORRENT
import transmissionrpc
from discordbot.utils.proxy import Proxy
import logging
log = logging.getLogger(__name__)
class Torrent:
def __init__(self):
try:
self.tc = transmissionrpc.Client('localhost', port=9091, user=USER_TORRENT, password=<PASSWORD>)
except Exception:
print("Can't connect to transmission, torrent task won't work")
log.warning("Can't connect to transmission, torrent task won't work")
self.tc = None
return
self.proxy = Proxy()
print("Torrent loaded")
log.info("Torrent loaded")
async def add_torrent(self, url):
if self.tc is None:
return False
total, used, free = shutil.disk_usage("/")
if free // (2**30) < 2:
print("No free space left, can't add torrent")
log.warning("No free space left, can't add torrent")
return False
await self.proxy.get_new()
parsed_url = urlparse(url)
i = 0
temp_file = None
async with aiohttp.ClientSession() as session:
while temp_file is None:
try:
async with session.get(url, timeout=5, proxy=self.proxy.current) as resp:
if (resp.status != 200):
print("Nyaa.si is probably down, failed to load torrent")
log.warning("Nyaa.si is probably down, failed to load torrent")
return False
else:
url_name = parsed_url.path.split('/')[-1]
with open('tmp/' + url_name, 'wb') as temp_file:
temp_file.write(await resp.read())
except Exception as e:
if i > 5:
await self.proxy.get_new()
i = 0
print("Failed to load torrent: {}".format(repr(e)))
log.warning("Failed to torrent: {}".format(repr(e)))
self.proxy.changeCurrent()
i += 1
await asyncio.sleep(5)
try:
self.tc.add_torrent('home/pi/git/discordbot/' + temp_file.name)
except Exception as e:
print("Error while adding torrent: {}".format(e))
log.warning("Error while adding torrent: {}".format(e))
return False
print("Successfully added torrent: {}".format(url))
log.info("Successfully added torrent: {}".format(url))
# os.remove('home/pi/git/discordbot/' + temp_file.name)
return True
| 2.71875
| 3
|
day060/server.py
|
avholloway/100DaysOfCode
| 0
|
12774406
|
<reponame>avholloway/100DaysOfCode<filename>day060/server.py
import json
import requests
from flask import Flask, request, render_template
app = Flask(__name__)
def fetch_posts():
resp = requests.get("https://api.npoint.io/43644ec4f0013682fc0d")
resp.raise_for_status()
return resp.json()
@app.route("/")
def page_home():
return render_template("index.html", posts=fetch_posts())
@app.route("/about")
def page_about():
return render_template("about.html")
@app.route("/contact", methods=["POST", "GET"])
def page_contact():
if request.method == 'POST':
name = request.form['name']
message = request.form['message']
print(f"{name=}, {message=}")
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
elif request.method == "GET":
return render_template("contact.html")
@app.route("/post/<int:id>")
def page_post(id):
return render_template("post.html", post=[post for post in fetch_posts() if post['id'] == id][0])
if __name__ == "__main__":
app.run(debug=True)
| 2.9375
| 3
|
.vim/.ycm_extra_conf.py
|
artemMur/dotfiles
| 0
|
12774407
|
<filename>.vim/.ycm_extra_conf.py
from distutils.sysconfig import get_python_inc
import platform
import os
import subprocess
import ycm_core
DIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wpedantic',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-x', 'c', '-std=c11',
#'-x', 'c++', '-std=c++11',
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def FindCorrespondingSourceFile( filename ):
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
return replacement_file
return filename
def Settings( **kwargs ):
if kwargs[ 'language' ] == 'cfamily':
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
# necessary since compilation databases don't have entries for header files.
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition
# in the corresponding source file.
filename = FindCorrespondingSourceFile( kwargs[ 'filename' ] )
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DIR_OF_THIS_SCRIPT,
'override_filename': filename
}
compilation_info = database.GetCompilationInfoForFile( filename )
if not compilation_info.compiler_flags_:
return {}
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_,
'override_filename': filename
}
return {}
def GetStandardLibraryIndexInSysPath( sys_path ):
for path in sys_path:
if os.path.isfile( os.path.join( path, 'os.py' ) ):
return sys_path.index( path )
raise RuntimeError( 'Could not find standard library path in Python path.' )
| 2.40625
| 2
|
zeropassweb/passes/forms.py
|
andreatulimiero/netsec-hs18
| 0
|
12774408
|
<reponame>andreatulimiero/netsec-hs18<gh_stars>0
from django import forms
class UsernamePasswordForm(forms.Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput())
class TransactionForm(forms.Form):
from_user = forms.CharField(label='Sending user')
from_user_pwd = forms.CharField(label='Password of sending user')
to_user = forms.IntegerField(label='Receiving user account id')
amount = forms.IntegerField(label='Amount of the transfer')
class SearchAccountForm(forms.Form):
username = forms.CharField(label='Search via username')
| 2.375
| 2
|
readability/syllables.py
|
jdkato/readability
| 0
|
12774409
|
<reponame>jdkato/readability
import re
from typing import Tuple, List
def is_complex(word: str, syllables: int) -> bool:
for suffix in ["es", "ed", "ing"]:
if word.endswith(suffix):
syllables -= 1
return syllables > 2
def count_syllables(word: str) -> int:
"""Return the number of syllables in the given word."""
word = word.lower()
size = len(word)
if size < 1:
return 0
elif size < 3:
return 1
case = cornercases.get(word)
if case:
return case
text, count = clean(word)
count += len(re_vowels.findall(text))
count -= len(re_monosyllabic_one.findall(text))
count -= len(re_monosyllabic_two.findall(text))
count += len(re_double_syllabic_one.findall(text))
count += len(re_double_syllabic_two.findall(text))
count += len(re_double_syllabic_three.findall(text))
count += len(re_double_syllabic_four.findall(text))
return 1 if count < 1 else count
def clean(word: str) -> Tuple[str, int]:
word, prefix = clear_part(word, increment_to_prefix, trim_any_prefixes)
word, suffix = clear_part(word, increment_to_suffix, trim_any_suffix)
return word, prefix + suffix
def clear_part(word: str, options: List[List[str]], f) -> Tuple[str, int]:
old = word
pos = len(options)
for i, trim in enumerate(options):
word = f(word, trim)
if word != old:
return word, pos - i
return word, 0
def trim_any_suffix(word: str, suffixes: List[str]) -> str:
"""Remove the provided suffixes from the given word."""
for suffix in suffixes:
if word.endswith(suffix):
return word.removesuffix(suffix)
return word
def trim_any_prefixes(word: str, prefixes: List[str]) -> str:
"""Remove the provided prefixes from the given word."""
for prefix in prefixes:
if word.startswith(prefix):
return word.removeprefix(prefix)
return word
cornercases = {
"abalone": 4,
"abare": 3,
"abed": 2,
"abruzzese": 4,
"abbruzzese": 4,
"aborigine": 5,
"aborigines": 5,
"acreage": 3,
"adame": 3,
"adieu": 2,
"adobe": 3,
"anemone": 4,
"apache": 3,
"aphrodite": 4,
"apostrophe": 4,
"ariadne": 4,
"cafe": 2,
"cafes": 2,
"calliope": 4,
"catastrophe": 4,
"chile": 2,
"chloe": 2,
"circe": 2,
"coyote": 3,
"epitome": 4,
"facsimile": 4,
"forever": 3,
"gethsemane": 4,
"guacamole": 4,
"hyperbole": 4,
"jesse": 2,
"jukebox": 2,
"karate": 3,
"machete": 3,
"maybe": 2,
"people": 2,
"recipe": 3,
"sesame": 3,
"shoreline": 2,
"simile": 3,
"syncope": 3,
"tamale": 3,
"yosemite": 4,
"daphne": 2,
"eurydice": 4,
"euterpe": 3,
"hermione": 4,
"penelope": 4,
"persephone": 4,
"phoebe": 2,
"zoe": 2,
}
re_monosyllabic_one = re.compile(
"cia(l|$)|"
+ "tia|"
+ "cius|"
+ "cious|"
+ "[^aeiou]giu|"
+ "[aeiouy][^aeiouy]ion|"
+ "iou|"
+ "sia$|"
+ "eous$|"
+ "[oa]gue$|"
+ ".[^aeiuoycgltdb]{2,}ed$|"
+ ".ely$|"
+ "^jua|"
+ "uai|"
+ "eau|"
+ "^busi$|"
+ "("
+ "[aeiouy]"
+ "("
+ "b|"
+ "c|"
+ "ch|"
+ "dg|"
+ "f|"
+ "g|"
+ "gh|"
+ "gn|"
+ "k|"
+ "l|"
+ "lch|"
+ "ll|"
+ "lv|"
+ "m|"
+ "mm|"
+ "n|"
+ "nc|"
+ "ng|"
+ "nch|"
+ "nn|"
+ "p|"
+ "r|"
+ "rc|"
+ "rn|"
+ "rs|"
+ "rv|"
+ "s|"
+ "sc|"
+ "sk|"
+ "sl|"
+ "squ|"
+ "ss|"
+ "th|"
+ "v|"
+ "y|"
+ "z"
+ ")"
+ "ed$"
+ ")|"
+ "("
+ "[aeiouy]"
+ "("
+ "b|"
+ "ch|"
+ "d|"
+ "f|"
+ "gh|"
+ "gn|"
+ "k|"
+ "l|"
+ "lch|"
+ "ll|"
+ "lv|"
+ "m|"
+ "mm|"
+ "n|"
+ "nch|"
+ "nn|"
+ "p|"
+ "r|"
+ "rn|"
+ "rs|"
+ "rv|"
+ "s|"
+ "sc|"
+ "sk|"
+ "sl|"
+ "squ|"
+ "ss|"
+ "st|"
+ "t|"
+ "th|"
+ "v|"
+ "y"
+ ")"
+ "es$"
+ ")"
)
re_monosyllabic_two = re.compile(
"[aeiouy]"
+ "("
+ "b|"
+ "c|"
+ "ch|"
+ "d|"
+ "dg|"
+ "f|"
+ "g|"
+ "gh|"
+ "gn|"
+ "k|"
+ "l|"
+ "ll|"
+ "lv|"
+ "m|"
+ "mm|"
+ "n|"
+ "nc|"
+ "ng|"
+ "nn|"
+ "p|"
+ "r|"
+ "rc|"
+ "rn|"
+ "rs|"
+ "rv|"
+ "s|"
+ "sc|"
+ "sk|"
+ "sl|"
+ "squ|"
+ "ss|"
+ "st|"
+ "t|"
+ "th|"
+ "v|"
+ "y|"
+ "z"
+ ")"
+ "e$",
)
re_double_syllabic_one = re.compile(
"(?:"
+ "[^aeiouy]ie"
+ "("
+ "r|"
+ "st|"
+ "t"
+ ")|"
+ "[aeiouym]bl|"
+ "eo|"
+ "ism|"
+ "asm|"
+ "thm|"
+ "dnt|"
+ "uity|"
+ "dea|"
+ "gean|"
+ "oa|"
+ "ua|"
+ "eings?|"
+ "[aeiouy]sh?e[rsd]"
+ ")$"
)
re_double_syllabic_two = re.compile("[^gq]ua[^auieo]|[aeiou]{3}|^(ia|mc|coa[dglx].)")
re_double_syllabic_three = re.compile(
"[^aeiou]y[ae]|"
+ "[^l]lien|"
+ "riet|"
+ "dien|"
+ "iu|"
+ "io|"
+ "ii|"
+ "uen|"
+ "real|"
+ "iell|"
+ "eo[^aeiou]|"
+ "[aeiou]y[aeiou]",
)
re_double_syllabic_four = re.compile("[^s]ia")
re_vowels = re.compile("[aeiouy]+")
increment_to_prefix = [
[
"above",
"anti",
"ante",
"counter",
"hyper",
"afore",
"agri",
"infra",
"intra",
"inter",
"over",
"semi",
"ultra",
"under",
"extra",
"dia",
"micro",
"mega",
"kilo",
"pico",
"nano",
"macro",
],
[
"un",
"fore",
"ware",
"none",
"non",
"out",
"post",
"sub",
"pre",
"pro",
"dis",
"side",
],
]
increment_to_suffix = [
["ology", "ologist", "onomy", "onomist"],
["fully", "berry", "woman", "women"],
[
"ly",
"less",
"some",
"ful",
"er",
"ers",
"ness",
"cian",
"cians",
"ment",
"ments",
"ette",
"ettes",
"ville",
"villes",
"ships",
"ship",
"side",
"sides",
"port",
"ports",
"shire",
"shires",
"tion",
"tioned",
],
]
| 3.890625
| 4
|
translator/conf/__init__.py
|
mail2nsrajesh/heat-translator
| 76
|
12774410
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' Initialize the global configuration for the translator '''
import os
from translator.conf.config import ConfigProvider
CONF_FILENAME = 'translator.conf'
def init_global_conf():
'''Initialize the configuration provider.
Allows the configuration to be shared throughout the translator code.
The file used is translator.conf, and is within the conf/ directory. It
is a standard ini format, and is prcessed using the ConfigParser module.
'''
conf_path = os.path.dirname(os.path.abspath(__file__))
conf_file = os.path.join(conf_path, CONF_FILENAME)
ConfigProvider._load_config(conf_file)
init_global_conf()
| 2.25
| 2
|
core.py
|
jonnelafin/JFUtils-python
| 0
|
12774411
|
version = "dev 0.0"
running = False
def init():
global running
if not running:
print("JFUtils-python \"" + version + "\" by jonnelafin")
running = True
| 1.65625
| 2
|
dueros/directive/AppLauncher/LaunchApp.py
|
ayxue/BaiduSaxoOpenAPI
| 0
|
12774412
|
#!/usr/bin/env python2
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/1/2
from dueros.directive.BaseDirective import BaseDirective
import logging
class LaunchApp(BaseDirective):
"""
用于调用app的指令类
"""
def __init__(self, app_name='', package_name='', deep_link=''):
"""
三者必须传一个
:param app_name: 应用名称
:param package_name: 应用包
:param deep_link: 打开应用指定功能
"""
super(LaunchApp, self).__init__('AppLauncher.LaunchApp')
if not app_name and not package_name and not deep_link:
print('app_name package_name deepLink 必须要有一个')
else:
self.data = dict({
'appName': app_name,
'packageName': package_name,
'deepLink': deep_link,
'token': self.gen_token()
}, **self.data)
def set_app_name(self, app_name):
if app_name:
self.data['appName'] = app_name
return self
def set_package_name(self, package_name):
if package_name:
self.data['packageName'] = package_name
return self
def set_deep_link(self, deep_link):
if deep_link:
self.data['deepLink'] = deep_link
return self
if __name__ == '__main__':
launchApp = LaunchApp('', '', '2')
launchApp.set_deep_link('dd')
print(launchApp.data)
| 2.328125
| 2
|
specification/scripts/spec_tools/util.py
|
SteveSmithEpic/OpenXR-SDK-Source
| 0
|
12774413
|
<filename>specification/scripts/spec_tools/util.py
"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def _conditional_string_strip_append(my_list, optional_str):
if not optional_str:
return
stripped = optional_str.strip()
if not stripped:
return
my_list.append(stripped)
def getParamOrMemberFullType(elem, default=None):
"""Get the full type associated with a member or param element.
This includes the text preceding, within, and following the 'type' tag."""
parts = []
for node in elem.iter():
_conditional_string_strip_append(parts, node.text)
_conditional_string_strip_append(parts, node.tail)
if node.tag == 'type':
return ' '.join(parts)
# Fallback if there is no child with a "type" tag
return default
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
| 1.921875
| 2
|
utils/build_server.py
|
ZQPei/Huster
| 6
|
12774414
|
<reponame>ZQPei/Huster
#!/home/pzq/anaconda3/bin/python
import sys
from huster.server import build_server, parse_args
def huster():
args = parse_args()
build_server(port=args.port, base_dir=args.base_dir)
sys.exit()
if __name__ == "__main__":
huster()
| 1.804688
| 2
|
tests/convert/test_converters.py
|
arkataev/text_normalizer
| 0
|
12774415
|
import json
import os
from contextlib import contextmanager
import pytest
from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num
from text_normalizer.convert._convert import _numerics
from ..settings import TESTS_PATH
with open(os.path.join(TESTS_PATH, 'convert/data/numerics_ds.json'), encoding='utf=8') as f:
nums_dataset = json.load(f)
@contextmanager
def does_not_raise():
yield
@pytest.mark.parametrize('text, num', _numerics.items())
def test_text2int_single(text, num):
nums_list = text.split()
assert text2int(*nums_list) == num[0]
@pytest.mark.parametrize('num, text', nums_dataset.items())
def _test_text2int_dataset(text, num):
nums_list = text.split()
assert text2int(*nums_list) == int(num)
@pytest.mark.parametrize('text, num', [
("десяток", 10),
("два десяток", 20),
("пять десяток", 50),
("сотня", 100),
("две сотня", 200),
("", 0),
])
def test_text2int_custom(text, num):
nums_list = text.split()
assert text2int(*nums_list) == num
@pytest.mark.parametrize('text, num, expected', [
('двести', 200, does_not_raise()),
('пять десятков', 50, pytest.raises(ValueError)),
('сто один двадцать', 0, pytest.raises(ValueError)),
('десяток двоек', 0, pytest.raises(ValueError)),
('тысяча миллион', 0, pytest.raises(ValueError)),
('абвгд', 0, pytest.raises(ValueError)),
])
def test_text2int_raises(text, num, expected):
nums_list = text.split()
with expected:
assert text2int(*nums_list) == num
@pytest.mark.parametrize('inp, outp', [
('10-ый', True),
('10-го', True),
('10-му', True),
('10-го', True),
('10-ым', True),
('10-м', True),
('10-ом', True),
('10', False),
])
def test_isordfold(inp, outp):
assert is_ordfold(inp) == outp
@pytest.mark.parametrize('inp, outp, expected', [
('10-ый', '10', does_not_raise()),
('10-го', '10', does_not_raise()),
('10-му', '10', does_not_raise()),
('10-го', '10', does_not_raise()),
('10-ым', '10', does_not_raise()),
('10-м', '10', does_not_raise()),
('10-ом', '10', does_not_raise()),
('10ый', '10', does_not_raise()),
('10го', '10', does_not_raise()),
('10му', '10', does_not_raise()),
('10го', '10', does_not_raise()),
('10ым', '10', does_not_raise()),
('10м', '10', does_not_raise()),
('10ом', '10', does_not_raise()),
("", '', pytest.raises(ValueError)),
("asdc", '', pytest.raises(ValueError)),
])
def test_ord_unfold(inp, outp, expected):
with expected:
assert ord_unfold(inp) == outp
@pytest.mark.parametrize('inp, outp', zip(MONTHS, range(1, 13)), ids=MONTHS)
def test_month2num(inp, outp):
assert month2num(inp) == outp
| 2.65625
| 3
|
doctor_tests/consumer/__init__.py
|
hashnfv/hashnfv-doctor
| 0
|
12774416
|
##############################################################################
# Copyright (c) 2017 ZTE Corporation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from oslo_config import cfg
from oslo_utils import importutils
OPTS = [
cfg.StrOpt('type',
default='sample',
choices=['sample'],
help='the component of doctor consumer',
required=True),
cfg.StrOpt('ip',
default='127.0.0.1',
help='the ip of consumer',
required=True),
cfg.IntOpt('port',
default='12346',
help='the port of doctor consumer',
required=True),
]
_consumer_name_class_mapping = {
'sample': 'doctor_tests.consumer.sample.SampleConsumer'
}
def get_consumer(conf, log):
consumer_class = _consumer_name_class_mapping.get(conf.consumer.type)
return importutils.import_object(consumer_class, conf, log)
| 2.046875
| 2
|
chapter-3/classifier_predict_v1.py
|
outerbounds/dsbook
| 27
|
12774417
|
from metaflow import FlowSpec, step, Flow, Parameter, JSONType
class ClassifierPredictFlow(FlowSpec):
vector = Parameter('vector', type=JSONType, required=True)
@step
def start(self):
run = Flow('ClassifierTrainFlow').latest_run
self.train_run_id = run.pathspec
self.model = run['end'].task.data.model
print("Input vector", self.vector)
self.next(self.end)
@step
def end(self):
print('Model', self.model)
if __name__ == '__main__':
ClassifierPredictFlow()
| 2.40625
| 2
|
src/unicef_snapshot/migrations/0001_initial.py
|
unicef/unicef-snapshot
| 0
|
12774418
|
<filename>src/unicef_snapshot/migrations/0001_initial.py
# Generated by Django 1.10.8 on 2018-03-26 16:05
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('target_object_id', models.CharField(db_index=True, max_length=255, verbose_name='Target Object ID')),
('action', models.CharField(choices=[('create', 'Create'),
('update', 'Update')], max_length=50, verbose_name='Action')),
('data', models.JSONField(verbose_name='Data')),
('change', models.JSONField(blank=True, verbose_name='Change')),
('by_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL, verbose_name='By User')),
('target_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='activity', to='contenttypes.ContentType', verbose_name='Content Type')),
],
options={
'ordering': ['-created'],
'verbose_name_plural': 'Activities',
},
),
]
| 1.726563
| 2
|
indad/train_patchcore.py
|
ChengIC/my_patchcore
| 2
|
12774419
|
from models import PatchCore
from save_utils import saveModelPath
import numpy
import torch
import warnings
from torch import tensor
from torchvision import transforms
import json
import numpy
from PIL import Image, ImageFilter
import os
from torch.utils.data import DataLoader,TensorDataset
warnings.filterwarnings("ignore")
class train_patchcore():
def __init__(self,configPath,train_imgs_folder,
resize=None,center_crop=None,
f_coreset=.20,backbone_name="wide_resnet50_2",
TimeStamp=None):
self.configPath=configPath
self.train_imgs_folder=train_imgs_folder
self.resize=resize
self.center_crop=center_crop
self.f_coreset=f_coreset
self.backbone_name=backbone_name
self.TimeStamp=TimeStamp
with open(configPath) as json_file:
self.data = json.load(json_file)
self.model=PatchCore(
f_coreset=f_coreset,
backbone_name=backbone_name,
)
self.train_tar,self.train_path,self.model_path=saveModelPath(self.configPath,self.TimeStamp)
IMAGENET_MEAN = tensor([.485, .456, .406])
IMAGENET_STD = tensor([.229, .224, .225])
transfoms_paras = [
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
]
if resize!=None:
transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC))
if center_crop!=None:
transfoms_paras.append(transforms.CenterCrop(center_crop))
if self.data!=None:
self.scaling_factor=self.data['scaling_factor']
self.median_blur_size=self.data['smooth']
if self.scaling_factor!=1:
width = int(self.data['original_imgsz'][0]*self.scaling_factor)
height = int(self.data['original_imgsz'][1]*self.scaling_factor)
self.resize=[height,width]
transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC))
self.loader=transforms.Compose(transfoms_paras)
def genTrainDS(self):
train_ims = []
train_labels = []
for img_id in self.data['train_ids']:
img_path = os.path.join(self.train_imgs_folder, img_id)
train_im = Image.open(img_path).convert('RGB')
if self.median_blur_size!=0:
train_im = train_im.filter(ImageFilter.MedianFilter(size=self.median_blur_size))
print ('Applying median filter on training image with degree of '+ str(self.median_blur_size))
train_im = self.loader(train_im)
train_label = tensor([0])
train_ims.append(train_im.numpy())
train_labels.append(train_label.numpy())
train_ims = numpy.array(train_ims)
train_labels = numpy.array(train_labels)
print ('Training Tensor Shape is' + str(train_ims.shape))
train_ims = torch.from_numpy(train_ims)
train_labels = torch.from_numpy(train_labels)
train_data = TensorDataset(train_ims,train_labels)
train_ds = DataLoader(train_data)
return train_ds
def saveTrainConfig(self):
self.data['configPath'] = self.configPath
self.data['imgsz'] = self.resize
self.data['center_crop'] = self.center_crop
self.data['scaling_factor'] = self.scaling_factor
self.data['train_imgs_folder'] = self.train_imgs_folder
self.data['backbone_name'] = self.backbone_name
self.data['TimeStamp'] = self.TimeStamp
json_string = json.dumps(self.data)
json_filePath = os.path.join(self.model_path,'training_config.json')
with open(json_filePath, 'w') as outfile:
outfile.write(json_string)
def run(self):
train_ds = self.genTrainDS()
tobesaved = self.model.fit(train_ds)
torch.save(tobesaved, self.train_tar)
torch.save(self.model.state_dict(), self.train_path)
self.saveTrainConfig()
| 2.140625
| 2
|
user/migrations/0011_alter_post_interacted_plusone.py
|
64Digits/SixtyFour
| 1
|
12774420
|
# Generated by Django 4.0.1 on 2022-01-13 19:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import user.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user', '0010_post_interacted'),
]
operations = [
migrations.AlterField(
model_name='post',
name='interacted',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.CreateModel(
name='PlusOne',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('targetType', models.IntegerField(choices=[(1, 'Comment'), (2, 'Post')])),
('targetComment', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='user.comment')),
('targetPost', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='user.post')),
('user', models.ForeignKey(on_delete=models.SET(user.models.get_sentinel_user), to=settings.AUTH_USER_MODEL)),
],
managers=[
('plus_ones', django.db.models.manager.Manager()),
],
),
]
| 1.71875
| 2
|
dnsaas/api/v2/serializers.py
|
mike-johnson-jr/django-powerdns-dnssec
| 32
|
12774421
|
"""Serializer classes for DNSaaS API"""
import ipaddress
from django.conf import settings
from django.contrib.auth import get_user_model
from powerdns.utils import find_domain_for_record
from powerdns.models import (
RECORD_A_TYPES,
CryptoKey,
Domain,
DomainMetadata,
DomainTemplate,
Record,
RecordRequest,
RecordTemplate,
RequestStates,
Service,
SuperMaster,
TsigKey,
)
from rest_framework import serializers
from rest_framework.serializers import (
PrimaryKeyRelatedField,
ReadOnlyField,
ModelSerializer,
SlugRelatedField,
)
class OwnerSerializer(ModelSerializer):
owner = SlugRelatedField(
slug_field='username',
queryset=get_user_model().objects.all(),
allow_null=True,
required=False,
)
class DomainSerializer(OwnerSerializer):
id = ReadOnlyField()
service_name = serializers.SerializerMethodField()
def get_service_name(self, obj):
return obj.service.name if obj.service else ''
class Meta:
model = Domain
read_only_fields = ('notified_serial',)
class ServiceSerializer(ModelSerializer):
class Meta:
model = Service
class RecordRequestSerializer(OwnerSerializer):
last_change = serializers.SerializerMethodField()
target_owner = SlugRelatedField(
slug_field='username',
queryset=get_user_model().objects.all(),
allow_null=True,
required=False,
)
created = serializers.DateTimeField(
format='%Y-%m-%d %H:%M:%S', read_only=True
)
modified = serializers.DateTimeField(
format='%Y-%m-%d %H:%M:%S', read_only=True
)
class Meta:
model = RecordRequest
def get_last_change(self, obj):
if obj.state == RequestStates.OPEN:
return obj._get_json_history(obj.get_object())
else:
return obj.last_change_json
def _trim_whitespace(data_dict, trim_fields):
for field_name in trim_fields:
if field_name not in data_dict:
continue
data_dict[field_name] = data_dict[field_name].strip()
return data_dict
class RecordSerializer(OwnerSerializer):
class Meta:
model = Record
read_only_fields = ('change_date', 'ordername',)
domain = PrimaryKeyRelatedField(
queryset=Domain.objects.all(),
required=False,
allow_null=True,
)
service = PrimaryKeyRelatedField(
queryset=Service.objects.all(),
required=False,
allow_null=True,
# required by setting REQUIRED_SERVICE_FIELD
)
service_uid = SlugRelatedField(
slug_field='uid',
source='service',
queryset=Service.objects.all(),
allow_null=True,
required=False,
many=False,
read_only=False,
# required by setting REQUIRED_SERVICE_FIELD
)
service_name = serializers.SerializerMethodField()
modified = serializers.DateTimeField(
format='%Y-%m-%d %H:%M:%S', read_only=True
)
change_request = serializers.SerializerMethodField(
'get_change_record_request'
)
delete_request = serializers.SerializerMethodField(
'get_delete_record_request'
)
unrestricted_domain = serializers.BooleanField(
source='domain.unrestricted', read_only=True
)
def is_valid(self, raise_exception=False):
if (
'service_uid' in self.initial_data and
not self.initial_data['service_uid']
):
del self.initial_data['service_uid']
return super(RecordSerializer, self).is_valid(raise_exception)
def get_service_name(self, obj):
return obj.service.name if obj.service else ''
def get_change_record_request(self, record):
record_request = record.requests.all()
if record_request:
return record_request[0].key
return None
def get_delete_record_request(self, record):
delete_request = record.delete_request.all()
if delete_request:
return delete_request[0].key
return None
def _clean_txt_content(self, record_type, attrs):
"""
Remove backslashes form `content` (from `attrs`) inplace when
`type`=TXT
"""
# DNS servers don't accept backslashes (\) in content so we neither
if record_type == 'TXT':
attrs['content'] = attrs['content'].replace('\\', '')
def _ensure_owner_is_set(self):
if self.instance and not self.instance.has_owner():
raise serializers.ValidationError({
'owner': [
'Record requires owner to be editable. Please contact DNS support.' # noqa
]
})
def _validate_service(self, attrs):
if not settings.REQUIRED_SERVICE_FIELD or self.instance:
return
if 'service' not in attrs:
raise serializers.ValidationError({
'service': [
'Service is required. Please provide DNSaaS internal '
'service ID in field `service` or global service UID in '
'field `service_uid`.'
]
})
def validate(self, attrs):
self._ensure_owner_is_set()
_trim_whitespace(attrs, ['name', 'content'])
domain, content, record_type = (
attrs.get('domain'), attrs.get('content'), attrs.get('type')
)
if record_type in RECORD_A_TYPES:
try:
ipaddress.ip_address(content)
except ValueError:
raise serializers.ValidationError({
'content': ['Content should be valid IP address']
})
self._clean_txt_content(record_type, attrs)
if (
domain and domain.template and
domain.template.is_public_domain and
content and record_type == 'A'
):
address = ipaddress.ip_address(content)
if address.is_private:
raise serializers.ValidationError(
{'content': ['IP address cannot be private.']}
)
if not self.instance:
# get domain from name only for creation
if not domain:
domain = find_domain_for_record(attrs['name'])
if not domain:
raise serializers.ValidationError({
'domain': [
'No domain found for name {}'.format(
attrs['name']
)
]
})
attrs['domain'] = domain
self._validate_service(attrs)
return attrs
class CryptoKeySerializer(ModelSerializer):
class Meta:
model = CryptoKey
class DomainMetadataSerializer(ModelSerializer):
class Meta:
model = DomainMetadata
class SuperMasterSerializer(ModelSerializer):
class Meta:
model = SuperMaster
class DomainTemplateSerializer(ModelSerializer):
class Meta:
model = DomainTemplate
class RecordTemplateSerializer(ModelSerializer):
class Meta:
model = RecordTemplate
class TsigKeysTemplateSerializer(ModelSerializer):
class Meta:
model = TsigKey
| 2.15625
| 2
|
lab4_2/helpers/FA.py
|
cinnamonbreakfast/flcd
| 0
|
12774422
|
<reponame>cinnamonbreakfast/flcd
class FiniteAutomataState:
def __init__(self, structure):
self.states = []
self.alphabet = []
self.initial = []
self.finals = []
self.transitions = {}
self._file = open(structure, "r")
self._load()
# print(self.validate())
def _load(self):
reading = "none"
reading = "none"
line = self._file.readline()
def classify(mode, probe):
if mode == "states":
spec = probe.split(', ')
self.states.extend(spec)
elif mode == "initial":
spec = probe.split(', ')
self.initial.extend(spec)
elif mode == "alpha":
spec = probe.split(', ')
self.alphabet.extend(spec)
elif mode == "trans":
values = probe.split(", ")
if (values[0], values[1]) in self.transitions.keys():
self.transitions[(values[0], values[1])].append(values[2])
else:
self.transitions[(values[0], values[1])] = [values[2]]
elif mode == "final":
tokens = probe.split(", ")
self.finals.extend(tokens)
while line:
if line.strip()[0] == '#':
reading = line.strip()[1:]
else:
classify(reading, line.strip())
line = self._file.readline()
def validate(self):
if self.initial[0] not in self.states:
return False
for final in self.finals:
if final not in self.states:
return False
for key in self.transitions.keys():
state = key[0]
symbol = key[1]
if state not in self.states or symbol not in self.alphabet:
return False
for dest in self.transitions[key]:
if dest not in self.states:
return False
return True
def dfa(self):
for key in self.transitions.keys():
if len(self.transitions[key]) > 1:
return False
return True
def accepted(self, sequence):
if self.dfa():
crt = self.initial[0]
for symbol in sequence:
if (crt, symbol) in self.transitions.keys():
crt = self.transitions[(crt, symbol)][0]
else:
return False
return crt in self.finals
return False
| 2.703125
| 3
|
send.py
|
MikeRyan56/Flask-RabbitMQ
| 0
|
12774423
|
<reponame>MikeRyan56/Flask-RabbitMQ<gh_stars>0
import pika
import json
import config as cfg
import silly
from random import randint, SystemRandom
import datetime
time_start = datetime.datetime.now()
count_id = 0
for x in range(0, 1000):
# credentials = pika.PlainCredentials(username=cfg.USER, password=cfg.USER)
# parameters = pika.ConnectionParameters(host=cfg.RABBIT_HOST, port=cfg.PORT, '/', credentials)
credentials = pika.PlainCredentials(username='guest', password='<PASSWORD>')
parameters = pika.ConnectionParameters("localhost",32783, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=cfg.QUEUE_TOPIC)
count_id += 1
messType = ["100", "101", "102", "103", "200", "201", "202"]
sr = SystemRandom()
srType = sr.choice(messType)
name = silly.thing()
city = silly.city()
company = silly.company()
description = silly.sentence()
data = {
"id": count_id,
"name": name,
"city": city,
"company": company,
"description": description,
"type": srType
}
message = json.dumps(data)
channel.basic_publish(exchange='', routing_key=cfg.QUEUE_TOPIC, body=message)
print(message)
connection.close()
time_end = datetime.datetime.now()
print(time_end - time_start)
| 2.640625
| 3
|
cask/cask/schema.py
|
dcramer/cask-server
| 2
|
12774424
|
import graphene
from graphene_django.types import DjangoObjectType
from cask.accounts.models import Follower
from cask.utils import optimize_queryset
from .models import CheckIn
class CheckInScope(graphene.Enum):
class Meta:
name = "CheckInScope"
public = "public"
friends = "friends"
class CheckInNode(DjangoObjectType):
class Meta:
model = CheckIn
name = "CheckIn"
class Query(object):
checkins = graphene.List(
CheckInNode,
id=graphene.UUID(),
scope=graphene.Argument(CheckInScope),
created_by=graphene.UUID(),
)
def resolve_checkins(
self, info, id: str = None, scope: str = None, created_by: str = None
):
user = info.context.user
qs = CheckIn.objects.all()
if id:
qs = qs.filter(id=id)
if scope == "friends":
if not user.is_authenticated:
return qs.none()
qs = qs.filter(created_by__in=Follower.objects.filter(from_user=user.id))
# there's not yet privacy scope
elif scope == "public":
pass
elif scope:
raise NotImplementedError
if created_by:
qs = qs.filter(created_by=created_by)
qs = qs.order_by("-created_at")
qs = optimize_queryset(qs, info, "checkins")
return qs
| 2.21875
| 2
|
pyScript/custom_src/Node.py
|
Shirazbello/Pyscriptining
| 0
|
12774425
|
<gh_stars>0
from PySide2.QtGui import QColor
class Node:
def __init__(self):
# general attributes
# static:
self.title = ''
self.type = '' # kind of extends the title with further information, f.ex.: 'function input node'
self.description = ''
self.package = None # 'built in' means built in, everything else that the node came from outside (important)
self.has_main_widget = False
self.main_widget_class = None
self.main_widget_pos = ''
self.design_style = 'extended' # default value just for testing
self.color = QColor(198, 154, 21) # QColor(59, 156, 217)
# dynamic: (get copied and then individually edited in NIs)
self.code = '' # only exists in pryScript for source code generation in static nodes (standard)!
self.inputs = []
self.outputs = []
# !!! inputs and outputs may be edited for input-and output nodes in VyFunction !!!
# class GetVariable_Node(Node):
# def __init__(self, parent_variable):
# super(GetVariable_Node, self).__init__()
#
# self.parent_variable = parent_variable
#
# self.title = parent_variable.vy_name
# self.type = 'get variable node'
# self.package = 'built in'
# self.description = 'returns variable'
# # TODO code of GetVariableNode
#
# output_port = NodePort()
# output_port.type = 'data'
# self.outputs.append(output_port)
#
#
class SetVariable_Node(Node):
def __init__(self):
super(SetVariable_Node, self).__init__()
self.title = 'set var'
self.type = 'set variable node'
self.package = 'built in'
self.description = 'sets the value of a variable'
exec_input_port = NodePort()
exec_input_port.type = 'exec'
self.inputs.append(exec_input_port)
var_name_data_input_port = NodePort()
var_name_data_input_port.type = 'data'
var_name_data_input_port.label = 'var'
var_name_data_input_port.widget_pos = 'besides'
self.inputs.append(var_name_data_input_port)
val_name_data_input_port = NodePort()
val_name_data_input_port.type = 'data'
val_name_data_input_port.label = 'val'
val_name_data_input_port.widget_pos = 'besides'
self.inputs.append(val_name_data_input_port)
exec_output_port = NodePort()
exec_output_port.type = 'exec'
self.outputs.append(exec_output_port)
val_output_port = NodePort()
val_output_port.type = 'data'
val_output_port.label = 'val'
self.outputs.append(val_output_port)
class GetVariable_Node(Node):
def __init__(self):
super(GetVariable_Node, self).__init__()
self.title = 'get var'
self.type = 'get variable node'
self.package = 'built in'
self.description = 'gets the value of a variable'
data_input_port = NodePort()
data_input_port.type = 'data'
data_input_port.widget_type = 'std line edit'
data_input_port.widget_pos = 'besides'
self.inputs.append(data_input_port)
data_output_port = NodePort()
data_output_port.type = 'data'
data_output_port.label = 'val'
self.outputs.append(data_output_port)
class NodePort:
# type = ''
# label = ''
def __init__(self):
# general attributes
self.type = '' # TODO: change type to _type (shadowing!)
self.label = ''
self.widget_type = 'std line edit' # only important for data inputs
self.widget_name = '' # only important for data inputs with custom programmed widgets
self.widget_pos = 'under' # " same
| 2.640625
| 3
|
pipeline/coann/brents_bpbio/blasttools/blast_misc/test_b.py
|
gturco/find_cns
| 4
|
12774426
|
import sys
sys.path.insert(0,".")
import blast_misc
import time
import operator
blast_file = 'data/t.blast'
b = blast_misc.blast_array(blast_file, best_hit=0, maxkeep=999999, dopickle=0)
print b
print b.shape
| 1.609375
| 2
|
sentiment_analysis/sentiment_analysis.py
|
mFarouki/tensorflow
| 0
|
12774427
|
import os
from download_and_view import download_and_unzip, check_directory_contents, read_random_review, \
remove_unneeded_directories
from load_raw_datasets import build_raw_datasets, view_dataset
from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset
from train_model import train_model
from evaluate_model import visualise_training
from export_model import export_model
imdb_url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
expected_top_level_contents = ['train', 'test', 'README', 'imdbEr.txt', 'imdb.vocab']
expected_train_contents = ['labeledBow.feat', 'urls_pos.txt', 'urls_unsup.txt', 'unsup', 'pos', 'unsupBow.feat',
'urls_neg.txt', 'neg']
def create_directory_structure():
dataset_dir = download_and_unzip("aclImdb_v1", imdb_url, 'aclImdb')
check_directory_contents(dataset_dir, expected_top_level_contents)
train_dir = os.path.join(dataset_dir, 'train')
read_random_review(train_dir)
remove_unneeded_directories(train_dir, 'unsup')
return dataset_dir
def main():
dataset_dir = create_directory_structure()
raw_train_dataset, raw_validation_dataset, raw_test_dataset = build_raw_datasets(dataset_dir)
view_dataset(raw_validation_dataset)
vectorised_layer = apply_vectorisation(raw_train_dataset)
max_features = len(vectorised_layer.get_vocabulary())
view_sample_vectorisation(raw_train_dataset, vectorised_layer)
train_dataset, validation_dataset, test_dataset = \
preprocess_dataset(raw_train_dataset, vectorised_layer), \
preprocess_dataset(raw_validation_dataset, vectorised_layer), \
preprocess_dataset(raw_test_dataset, vectorised_layer)
model, history = train_model(train_dataset, validation_dataset, max_features)
visualise_training(history, model, test_dataset)
model_to_export = export_model(vectorised_layer, model)
if __name__ == "__main__":
main()
| 2.28125
| 2
|
tests/__init__.py
|
jcs-lambda/lambdata-jcs-lambda
| 0
|
12774428
|
<reponame>jcs-lambda/lambdata-jcs-lambda<gh_stars>0
"""Unit tests for df_utils"""
| 1.046875
| 1
|
introduccion_algoritmos/1-introduccion/uddt.py
|
cristian-rincon/escuela-datascience
| 0
|
12774429
|
<gh_stars>0
"""
User Defined Data Type
"""
from dataclasses import dataclass
@dataclass
class Client:
name: str
id: str
credit: float
address: str
def main():
user1 = Client("Cristian", "00001", 4400000, "Cr 1 nro 1 01")
print(f'The client name is: {user1.name}')
print(f'The client id is: {user1.id}')
print(f'The client credit is: {user1.credit}')
print(f'The client address is: {user1.address}')
print(type(Client))
if __name__ == "__main__":
main()
| 3.546875
| 4
|
src/anmi/T3/__init__.py
|
alexmascension/ANMI
| 1
|
12774430
|
<reponame>alexmascension/ANMI
from sympy import symbols, poly, zeros, simplify
from anmi.T2 import factorizacion_QR
def matriz_krylov(A, x, n_iters=None):
"""Genera una matriz de krylov dada una matriz A y un vector x. Cada columna de la matriz es la iteración i de A^i*x.
Args:
A (matriz): Matriz de aplicación
x (vector): Vector base
n_iters (int, optional): Número de iteraciones. Por defecto es el número de filas de A + 1 (garantiza que la matriz de krylov tiene una combinación lineal).
Returns:
m_krylov: Matriz con las aplicaciones de krylov por columna.
"""
if n_iters is None:
n_iters = A.shape[0] + 1
m_krylov = zeros(A.shape[0], n_iters)
m_krylov[:, 0] = x
for i in range(1, n_iters):
m_krylov[:, i] = A * m_krylov[:, i - 1]
return simplify(m_krylov)
def sucesion_krylov(A, x):
"""
La sucesión de krylov viene dada como la expresión (ejemplo para 3x3)
p(l) = -l³ + a2 * l² + a1 * l + a0
donde a = [a0, a1, a2] y es la solución del sistema de ecuaciones
[x.T, Ax.T, Ax².T] * a.T = Ax³.T
Args:
A (matriz): Matriz para aplicar Krylov
x (vector): Vector base
Returns:
poli_krylov (poly): Polinomio para la matriz de Krylov.
a (vector): vector de coeficientes.
"""
m_krylov = matriz_krylov(A, x)
a = (m_krylov[:, : A.shape[0]] ** (-1)) * (
((-1) ** (A.shape[0] + 1)) * m_krylov[:, -1]
)
a = simplify(a)
lambda_ = symbols("lambda")
poli_krylov = poly(((-1) ** A.shape[0]) * (lambda_ ** A.shape[0]) + a[0])
for i in range(1, A.shape[0]):
poli_krylov += poly(a[i] * (lambda_ ** i))
return poli_krylov, a
def potencia_iterada(A, x, n_iters, devolver_ultimo=True):
"""Aplica el método de la potencia iterada para calcular el mayor autovalor de la matriz, usando el método de Krylov.
Args:
A (matriz): Matriz aplicación
x (vector): Vector base para el método. Si el vector es autovector dará fallo.
n_iters (int): Número de iteraciones
devolver_ultimo (bool, optional): Si True, devuelve el vector de la última iteración. Si False, devuelve todas las iteraciones.
Returns:
m_cocientes (matriz, vector): matriz/vector con el número de filas igual al de A, con los cocientes. Los números deberían tender al mayor autovalor de A.
"""
m_krylov = matriz_krylov(A, x, n_iters=n_iters)
if devolver_ultimo:
m_cocientes = zeros(m_krylov.shape[0], 1)
for row in range(m_cocientes.shape[0]):
m_cocientes[row, 0] = (
m_krylov[row, m_krylov.shape[1] - 1]
/ m_krylov[row, m_krylov.shape[1] - 2]
)
else:
m_cocientes = zeros(m_krylov.shape[0], m_krylov.shape[1] - 1)
for col in range(m_cocientes.shape[1]):
for row in range(
m_cocientes.shape[0]
): # No hay element-wise division en sympy
m_cocientes[row, col] = m_krylov[row, col + 1] / m_krylov[row, col]
m_cocientes = simplify(m_cocientes)
return m_cocientes
def metodo_autovals_QR(A, n_iters=3, verbose=True):
"""Aplica el método QR para el cálculo de autovalores de una matriz.
Args:
A (matriz): Matriz para el metodo
n_iters (int, optional): Número de iteraciones. Defaults to 3.
verbose (bool, optional): Imprime información intermedia.
Returns:
dict: "A": lista de valores de A = R*Q en cada iteración, "R" Y "Q": matrices Q y R del método.
"""
list_A, list_Q, list_R = [], [], []
# La lista de A tiene los A^{(2)} hasta A^{(k+1)}, porque A^{(1)} = A, y ya lo conocemos
for i in range(n_iters):
dict_QR = factorizacion_QR(A, verbose=verbose)
Q, R = simplify(dict_QR["Q"]), simplify(dict_QR["R"])
list_Q.append(Q)
list_R.append(R)
A = simplify(R * Q).copy()
list_A.append(A)
return {"A": list_A, "Q": list_Q, "R": list_R}
| 3.109375
| 3
|
Tests/test_KDTree.py
|
erpeg/biopython
| 1
|
12774431
|
<gh_stars>1-10
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for KDTree module."""
import unittest
try:
import numpy
del numpy
except ImportError:
from Bio import MissingExternalDependencyError
raise MissingExternalDependencyError("Install NumPy if you want to use Bio.KDTree.")
from numpy import sum, sqrt, array
from numpy import random
import warnings
from Bio import BiopythonDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonDeprecationWarning)
try:
from Bio.KDTree import _CKDTree
del _CKDTree
except ImportError:
from Bio import MissingExternalDependencyError
raise MissingExternalDependencyError("C module in Bio.KDTree not compiled")
from Bio.KDTree.KDTree import KDTree
nr_points = 5000
dim = 3
bucket_size = 5
radius = 0.01
query_radius = 10
def _dist(p, q):
diff = p - q
return sqrt(sum(diff * diff))
def neighbor_test(nr_points, dim, bucket_size, radius):
"""Test all fixed radius neighbor search.
Test all fixed radius neighbor search using the
KD tree C module.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- radius: radius of search (typically 0.05 or so)
Returns true if the test passes.
"""
# KD tree search
kdt = KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
kdt.kdt.set_data(coords)
neighbors = kdt.kdt.neighbor_search(radius)
r = [neighbor.radius for neighbor in neighbors]
if r is None:
l1 = 0
else:
l1 = len(r)
# now do a slow search to compare results
neighbors = kdt.kdt.neighbor_simple_search(radius)
r = [neighbor.radius for neighbor in neighbors]
if r is None:
l2 = 0
else:
l2 = len(r)
if l1 == l2:
# print("Passed.")
return True
else:
print("Not passed: %i != %i." % (l1, l2))
return False
def test(nr_points, dim, bucket_size, radius):
"""Test neighbor search.
Test neighbor search using the KD tree C module.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- radius: radius of search (typically 0.05 or so)
Returns true if the test passes.
"""
# kd tree search
kdt = KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
center = coords[0]
kdt.kdt.set_data(coords)
kdt.kdt.search_center_radius(center, radius)
r = kdt.get_indices()
if r is None:
l1 = 0
else:
l1 = len(r)
l2 = 0
# now do a manual search to compare results
for i in range(0, nr_points):
p = coords[i]
if _dist(p, center) <= radius:
l2 = l2 + 1
if l1 == l2:
# print("Passed.")
return True
else:
print("Not passed: %i != %i." % (l1, l2))
return False
def test_search(nr_points, dim, bucket_size, radius):
"""Test search all points within radius of center.
Search all point pairs that are within radius.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- radius: radius of search
Returns true if the test passes.
"""
kdt = KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
kdt.set_coords(coords)
kdt.search(coords[0], radius * 100)
radii = kdt.get_radii()
l1 = 0
for i in range(0, nr_points):
p = coords[i]
if _dist(p, coords[0]) <= radius * 100:
l1 = l1 + 1
if l1 == len(radii):
return True
else:
return False
def test_all_search(nr_points, dim, bucket_size, query_radius):
"""Test fixed neighbor search.
Search all point pairs that are within radius.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- query_radius: radius of search
Returns true if the test passes.
"""
kdt = KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
kdt.set_coords(coords)
kdt.all_search(query_radius)
indices = kdt.all_get_indices()
if indices is None:
l1 = 0
else:
l1 = len(indices)
radii = kdt.all_get_radii()
if radii is None:
l2 = 0
else:
l2 = len(radii)
if l1 == l2:
return True
else:
return False
class KDTreeTest(unittest.TestCase):
def test_KDTree_exceptions(self):
kdt = KDTree(dim, bucket_size)
with self.assertRaises(Exception) as context:
kdt.set_coords(random.random((nr_points, dim)) * 100000000000000)
self.assertTrue(
"Points should lie between -1e6 and 1e6" in str(context.exception)
)
with self.assertRaises(Exception) as context:
kdt.set_coords(random.random((nr_points, dim - 2)))
self.assertIn("Expected a Nx%i NumPy array" % dim, str(context.exception))
with self.assertRaises(Exception) as context:
kdt.search(array([0, 0, 0]), radius)
self.assertIn("No point set specified", str(context.exception))
def test_KDTree_neighbour(self):
for i in range(0, 10):
self.assertTrue(neighbor_test(nr_points, dim, bucket_size, radius))
def test_KDTree(self):
for i in range(0, 10):
self.assertTrue(test(nr_points, dim, bucket_size, radius))
def test_all_search(self):
for i in range(0, 5):
self.assertTrue(
test_all_search((nr_points // 10), dim, bucket_size, query_radius)
)
def test_search(self):
for i in range(0, 5):
self.assertTrue(test_search(nr_points, dim, bucket_size, radius))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 2.390625
| 2
|
tests/gbe/scheduling/test_rehearsal_wizard.py
|
bethlakshmi/gbe-divio-djangocms-python2.7
| 1
|
12774432
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from tests.factories.gbe_factories import (
ConferenceFactory,
GenericEventFactory,
PersonaFactory,
ProfileFactory,
)
from tests.contexts import (
StaffAreaContext,
VolunteerContext,
)
from scheduler.models import Event
from tests.functions.gbe_functions import (
assert_alert_exists,
grant_privilege,
login_as,
)
from settings import GBE_DATE_FORMAT
from tests.gbe.scheduling.test_scheduling import TestScheduling
class TestRehearsalWizard(TestScheduling):
'''Tests for the 2nd stage in the rehearsal wizard view'''
view_name = 'rehearsal_wizard'
def setUp(self):
self.show_volunteer = VolunteerContext()
self.current_conference = self.show_volunteer.conference
self.url = reverse(
self.view_name,
args=[self.current_conference.conference_slug],
urlconf='gbe.scheduling.urls')
self.client = Client()
self.privileged_user = ProfileFactory().user_object
grant_privilege(self.privileged_user, 'Scheduling Mavens')
def test_authorized_user_can_access(self):
login_as(self.privileged_user, self)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assert_event_was_picked_in_wizard(response, "rehearsal")
self.assertContains(response, str(self.show_volunteer.event.e_title))
self.assertContains(response,
"Make New Show")
def test_authorized_user_empty_conference(self):
other_conf = ConferenceFactory()
login_as(self.privileged_user, self)
self.url = reverse(
self.view_name,
args=[other_conf.conference_slug],
urlconf='gbe.scheduling.urls')
response = self.client.get(self.url)
self.assertNotContains(response,
str(self.show_volunteer.event.e_title))
self.assertContains(response,
"Make New Show")
def test_auth_user_can_pick_show(self):
login_as(self.privileged_user, self)
response = self.client.post(
self.url,
data={
'pick_show': True,
'show': self.show_volunteer.sched_event.pk},
follow=True)
self.assertRedirects(
response,
"%s?rehearsal_open=True" % reverse(
'edit_show',
urlconf='gbe.scheduling.urls',
args=[self.current_conference.conference_slug,
self.show_volunteer.sched_event.pk]))
def test_invalid_form(self):
login_as(self.privileged_user, self)
response = self.client.post(
self.url,
data={
'pick_show': True,
'show': "boo"})
self.assertContains(
response,
'Select a valid choice. boo is not one of the available choices.')
def test_auth_user_pick_new_show(self):
login_as(self.privileged_user, self)
response = self.client.post(
self.url,
data={
'pick_show': True,
'show': ""},
follow=True)
self.assertRedirects(
response,
reverse('create_ticketed_event_wizard',
urlconf='gbe.scheduling.urls',
args=[self.current_conference.conference_slug,
"show"])+"?")
| 1.984375
| 2
|
main.py
|
CornellDataScience/ssbm-gym
| 1
|
12774433
|
<reponame>CornellDataScience/ssbm-gym<gh_stars>1-10
import argparse
import torch
import torch.optim as optim
from StateNet import StateNet
from ppo_model import Actor
from envs import GoHighEnvVec
from ssbm_gym.ssbm_env import EnvVec, SSBMEnv, HierEnv
import train
import train_state
parser = argparse.ArgumentParser(description='A2C (Advantage Actor-Critic)')
parser.add_argument('--no-cuda', action='store_true', help='use to disable available CUDA')
parser.add_argument('--num-workers', type=int, default=4, help='number of parallel workers')
parser.add_argument('--rollout-steps', type=int, default=600, help='steps per rollout')
parser.add_argument('--total-steps', type=int, default=int(4e7), help='total number of steps to train for')
parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='gamma parameter for GAE')
parser.add_argument('--lambd', type=float, default=1.00, help='lambda parameter for GAE')
parser.add_argument('--epsilon', type=float, default=0.10, help='epsilon parameter for PPO')
parser.add_argument('--value_coeff', type=float, default=0.5, help='value loss coeffecient')
parser.add_argument('--entropy_coeff', type=float, default=0.01, help='entropy loss coeffecient')
parser.add_argument('--grad_norm_limit', type=float, default=40., help='gradient norm clipping threshold')
parser.add_argument('--state_pred', type=bool, default=False, help='adds state predition model')
parser.add_argument('--state_offset', type=int, default=7, help='offset for state prediction, default 7')
parser.add_argument('--hier', type=bool, default=False, help='include hierarchical options in state')
args = parser.parse_args()
options = dict(
render=False,
player1='ai',
player2='human',
char1='fox',
char2='fox',
cpu2=3,
stage='final_destination',
)
if __name__ == "__main__":
if args.hier:
pretrain_env = EnvVec(HierEnv, args.num_workers, args.total_steps, options)
else:
pretrain_env = EnvVec(SSBMEnv, args.num_workers, args.total_steps, options)
if args.state_pred:
#double the input size
net = Actor(pretrain_env.observation_space.n * 2, pretrain_env.action_space.n)
state_net = StateNet(pretrain_env.observation_space.n)
optimizer = optim.Adam(net.parameters(), lr=args.lr)
optimizer_state = optim.Adam(state_net.parameters(), lr=args.lr)
n_steps = train_state.pretrain(args, net, optimizer, pretrain_env, state_net, optimizer_state)
else:
net = Actor(pretrain_env.observation_space.n, pretrain_env.action_space.n)
optimizer = optim.Adam(net.parameters(), lr=args.lr)
n_steps = train.pretrain(args, net, optimizer, pretrain_env)
options['player2'] = 'cpu'
if args.hier:
train_env = EnvVec(HierEnv, args.num_workers, args.total_steps, options)
else:
train_env = EnvVec(SSBMEnv, args.num_workers, args.total_steps, options)
if args.state_pred:
train_state.train(args, net, optimizer, train_env, n_steps, state_net, optimizer_state)
else:
train.train(args, net, optimizer, train_env, n_steps)
| 1.921875
| 2
|
day3/day3a-dbg.py
|
fseraphine/aoc2020
| 0
|
12774434
|
<reponame>fseraphine/aoc2020
#!/usr/bin/python
compteur = 0
i,j = 3,1
terrain = []
fichier = open('day3_input.txt')
for l in fichier:
terrain.append(fichier.readline().strip('\n'))
nblig = len(terrain)
nbcol = len(terrain[0])
print('nblig : %s / nbcol : %s' % (nblig,nbcol))
for f in terrain:
print(f)
while j<nblig:
#print(i,j,terrain[j][i],compteur)
if terrain[j][i] == '#':
compteur = compteur +1
print(terrain[j][0:i-1]+'X'+terrain[j][i+1:nbcol-1])
else:
print(terrain[j][0:i-1]+'O'+terrain[j][i+1:nbcol-1])
i = (i+3)%nbcol
j = j+1
print(compteur)
| 3.140625
| 3
|
logsearch/zuul.py
|
gibizer/zuul-log-search
| 0
|
12774435
|
<reponame>gibizer/zuul-log-search
import logging
from typing import List, Dict, Optional, Set
import os
import requests
LOG = logging.getLogger(__name__)
class ZuulException(BaseException):
pass
class API:
def __init__(self, zuul_url: str) -> None:
self.zuul_url = zuul_url
def get_build(self, tenant: str, build_uuid: str) -> Dict:
try:
r = requests.get(
self.zuul_url + f"/tenant/{tenant}/builds",
params={"uuid": build_uuid},
)
r.raise_for_status()
builds = r.json()
if len(builds) == 0:
raise ZuulException(f"Build {build_uuid} not found")
if len(builds) > 1:
raise ZuulException(
f"More than one results for {build_uuid}: %s" % builds
)
return builds[0]
except requests.RequestException as e:
raise ZuulException("Cannot access Zuul") from e
def list_builds(
self,
tenant,
project: Optional[str],
pipeline: Optional[str],
jobs: Set[str],
branches: List[str],
result: Optional[str],
voting: Optional[bool],
limit: Optional[int],
change: Optional[int],
patchset: Optional[int],
) -> List[Dict]:
params: Dict = {}
if project is not None:
params["project"] = project
if pipeline is not None:
params["pipeline"] = pipeline
params["job_name"] = jobs
params["branch"] = branches
if result is not None:
params["result"] = result
if voting is not None:
params["voting"] = "1" if voting else "0"
if limit is not None:
params["limit"] = limit
if change is not None:
params["change"] = change
if patchset is not None:
params["patchset"] = patchset
try:
r = requests.get(
self.zuul_url + f"/tenant/{tenant}/builds", params=params
)
r.raise_for_status()
return r.json()
except requests.RequestException as e:
raise ZuulException("Cannot access Zuul") from e
@staticmethod
def fetch_log(build, log_file, local_path, progress_handler) -> None:
url = os.path.join(build["log_url"], log_file)
LOG.debug(f"Fetching {url}")
i = 0
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=10 * 1024):
f.write(chunk)
i += 1
progress_handler(i)
| 2.421875
| 2
|
nets/mish.py
|
HirataYurina/yolov4-tiny-keras
| 13
|
12774436
|
<filename>nets/mish.py
# -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:<EMAIL>
# datetime:1993/12/01
# filename:mish.py
# software: PyCharm
import keras.backend as K
import keras
class Mish(keras.layers.Layer):
"""mish activation
Mish: A Self Regularized Non-Monotonic Activation Function
https://arxiv.org/abs/1908.08681?context=stat
relu, leaky relu and prelu some disadvantages that they are to hard because they are linear activation.
But mish is soft that can bring more information into neural network.
Mish(x) = x * tanh(log(1 + e ^ x))
"""
def __init__(self, **kwargs):
super(Mish, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
results = inputs * K.tanh(K.softplus(inputs))
return results
| 2.96875
| 3
|
budget_nanny/budgets.py
|
Pepedou/budget-nanny
| 0
|
12774437
|
import itertools
from budget_nanny.api_requests import APIRequester, BUDGETS_ENDPOINT, BUDGET_ENDPOINTS
DEFAULT_BUDGET = 'Personal'
class BudgetRequester:
def __init__(self, budget):
self.budget = budget
self.api_requester = APIRequester()
def create_transaction(self, transaction_data):
return self.api_requester.post(
BUDGET_ENDPOINTS['transactions'].replace('budget_id', self.budget['id']), {
'transaction': transaction_data
}
)
def create_transactions(self, transactions):
return self.api_requester.post(
BUDGET_ENDPOINTS['transactions'].replace('budget_id', self.budget['id']), {
'transactions': list(transactions)
}
)
def get_accounts(self):
return self._get_budget_collection('accounts')
def get_categories(self):
return itertools.chain.from_iterable([
x['categories'] for x in self.api_requester.get(
BUDGET_ENDPOINTS['categories'].replace('budget_id', self.budget['id'])
)['category_groups']
])
def get_payees(self):
return self._get_budget_collection('payees')
def get_transactions(self):
return self._get_budget_collection('transactions')
def _get_budget_collection(self, collection_key):
return self.api_requester.get(
BUDGET_ENDPOINTS[collection_key].replace('budget_id', self.budget['id'])
)[collection_key]
def get_budgets():
return APIRequester().get(BUDGETS_ENDPOINT)['budgets']
budgets = get_budgets()
default_budget = [x for x in budgets if x['name'] == DEFAULT_BUDGET][0]
default_budget_requester = BudgetRequester(default_budget)
| 2.296875
| 2
|
tools/arrays.py
|
fakufaku/doamm
| 7
|
12774438
|
<gh_stars>1-10
import numpy as np
from pyroomacoustics import MicrophoneArray
_pyramic_loc = np.array(
[
[-0.014433756729740652, -0.02500000000000001, -0.14288690166235207],
[-0.02598076211353317, -0.04500000000000001, -0.11022703842524302],
[-0.037527767497325684, -0.06500000000000002, -0.077567175188134],
[-0.04214656965084269, -0.07300000000000001, -0.06450322989329038],
[-0.04445597072760119, -0.07700000000000001, -0.057971257245868546],
[-0.049074772881118195, -0.085, -0.04490731195102493],
[-0.060621778264910706, -0.10500000000000001, -0.012247448713915893],
[-0.07216878364870323, -0.125, 0.020412414523193145],
[-0.08660254037844388, -0.1, 0.061237243569579464],
[-0.08660254037844388, -0.06, 0.061237243569579464],
[-0.08660254037844388, -0.02, 0.061237243569579464],
[-0.08660254037844388, -0.004, 0.061237243569579464],
[-0.08660254037844388, 0.004, 0.061237243569579464],
[-0.08660254037844388, 0.02, 0.061237243569579464],
[-0.08660254037844388, 0.06, 0.061237243569579464],
[-0.08660254037844388, 0.1, 0.061237243569579464],
[0.028867513459481305, 0.0, -0.14288690166235207],
[0.05196152422706635, 0.0, -0.11022703842524302],
[0.07505553499465137, 0.0, -0.077567175188134],
[0.08429313930168539, 0.0, -0.06450322989329038],
[0.0889119414552024, 0.0, -0.057971257245868546],
[0.09814954576223642, 0.0, -0.04490731195102493],
[0.12124355652982144, 0.0, -0.012247448713915893],
[0.1443375672974065, 0.0, 0.020412414523193145],
[0.1299038105676658, -0.025000000000000022, 0.061237243569579464],
[0.09526279441628827, -0.04500000000000002, 0.061237243569579464],
[0.060621778264910726, -0.06500000000000002, 0.061237243569579464],
[0.04676537180435971, -0.07300000000000001, 0.061237243569579464],
[0.039837168574084196, -0.07700000000000001, 0.061237243569579464],
[0.02598076211353318, -0.085, 0.061237243569579464],
[-0.008660254037844359, -0.10500000000000001, 0.061237243569579464],
[-0.04330127018922191, -0.125, 0.061237243569579464],
[-0.014433756729740652, 0.02500000000000001, -0.14288690166235207],
[-0.02598076211353317, 0.04500000000000001, -0.11022703842524302],
[-0.037527767497325684, 0.06500000000000002, -0.077567175188134],
[-0.04214656965084269, 0.07300000000000001, -0.06450322989329038],
[-0.04445597072760119, 0.07700000000000001, -0.057971257245868546],
[-0.049074772881118195, 0.085, -0.04490731195102493],
[-0.060621778264910706, 0.10500000000000001, -0.012247448713915893],
[-0.07216878364870323, 0.125, 0.020412414523193145],
[-0.04330127018922191, 0.125, 0.061237243569579464],
[-0.008660254037844359, 0.10500000000000001, 0.061237243569579464],
[0.02598076211353318, 0.085, 0.061237243569579464],
[0.039837168574084196, 0.07700000000000001, 0.061237243569579464],
[0.04676537180435971, 0.07300000000000001, 0.061237243569579464],
[0.060621778264910726, 0.06500000000000002, 0.061237243569579464],
[0.09526279441628827, 0.04500000000000002, 0.061237243569579464],
[0.1299038105676658, 0.025000000000000022, 0.061237243569579464],
]
)
def get_pyramic(center):
"""
Provides a (3x48) array containing the locations of the microphone of the Pyramic array geometry in the columns
Parameters
----------
center: List or ndarray of length 3
The 3D coordinates of the center of the array
"""
center = np.array(center)
assert (
center.ndim == 1
), "Center should be provided as a 1D array with 3 entries containing the coordinates of the center of the array"
assert (
center.shape[0] == 3
), "Center should be provided as a 1D array with 3 entries containing the coordinates of the center of the array"
R = center[:, None] + (_pyramic_loc - np.mean(_pyramic_loc, axis=0)).T
return R
def get_circular(center, n_mics, radius, rot=0.0, center_mic=False):
"""
Create a circular array parallel to the x-y plane
"""
center = np.array(center)
assert (
center.ndim == 1
), "Center should be provided as a 1D array with 3 entries containing the coordinates of the center of the array"
assert (
center.shape[0] == 3
), "Center should be provided as a 1D array with 3 entries containing the coordinates of the center of the array"
R = np.zeros((3, n_mics), dtype=center.dtype)
if center_mic:
R[:, 0] = center
n_mics -= 1
phi = 2.0 * np.pi * np.arange(n_mics) / n_mics + rot
R[0, -n_mics:] = center[0] + radius * np.cos(phi)
R[1, -n_mics:] = center[1] + radius * np.sin(phi)
R[2, -n_mics:] = center[2]
return R
def get_amazon_echo(center):
"""
Returns the coordinates of an Amazon Echo-like microphone array
"""
R = arrays.get_circular(
center=mic_array_loc, n_mics=7, radius=0.04, rot=0.0, center_mic=True
)
return R
_by_name = {
"amazon_echo": get_amazon_echo,
"pyramic": get_pyramic,
}
def get_by_name(name, center):
return _by_name[name](center)
| 1.796875
| 2
|
kalman/quadrotor.py
|
jlrandulfe/drone-swarm
| 8
|
12774439
|
import numpy as np
from scipy import linalg as la
class quadrotor:
def __init__(self, tag, m, l, J, CDl, CDr, kt, km, kw, att, \
pqr, xyz, v_ned, w):
# physical constants
self.tag = tag
self.m = m # [Kg]
self.l = l # [m]
self.J = J # Inertia matrix [Kg/m^2]
self.Jinv = la.inv(J)
self.CDl = CDl # Linear Drag coefficient
self.CDr = CDr # Angular Drag coefficient
self.kt = kt # Propeller thrust [N s^2]
self.km = km # Propeller moment [N m s^2]
self.kw = kw # Motor transient [1/s]
# Configuration of the propellers
self.w_to_Tlmn = np.array([[ -kt, -kt, -kt, -kt],\
[ 0,-l*kt, 0, l*kt],\
[ l*kt, 0,-l*kt, 0],\
[ -km, km, -km, km]])
self.Tlmn_to_w = la.inv(self.w_to_Tlmn)
# Physical variables
self.att = att # Attitude [rad]
self.pqr = pqr # Body angular velocity [rad/sec]
self.xyz = xyz # Body position NED [m]
self.v_ned = v_ned # Body linear velocity NED [m/sec]
self.w = w # Actual angular velocity of the propellers [rad/sec]
self.Ft = np.array([0.0, 0.0, 0.0]) # Motor (Thrust) Force [N]
self.Fa = np.array([0.0, 0.0, 0.0]) # Aerodynamic Forces [N]
self.Mt = np.array([0.0, 0.0, 0.0]) # Motor Moment [N m]
self.Ma = np.array([0.0, 0.0, 0.0]) # Aerodynamic Moments [N m]
self.crashed = 0 # Ground hit?
## GNC variables
# Geometric (1) or std (0) attitude controller
self.att_con = 1
# Gains for the attitude controller
self.kp = 2
self.kq = 2
self.kr = 2
# Gains for classical controllers
self.k_pos = 2e-1
self.k_vel = 2e-1
# Gains for Lyapunov controllers
self.k_alt = 1e-2
self.k_vz = 1
self.k_xy = 1e-2
self.k_vxy = 1e-1
self.k_xi_g_v = 1e-1
self.k_xi_g_e_alt = 5e-3
self.k_xi_CD_e_v = 1e-3
self.e_alt = 0 # We need it for the estimator xi_g
self.e_v = np.array([0, 0]) # We need it for the estimator xi_Cd
# Gains for geometric att controller
self.k_eR = 5e-3
self.k_om = 1e-2
self.T_d = 0 # Desired thrust [N]
self.lmn_d = np.array([0.0, 0.0, 0.0]) # Desired angular momentum [N m]
self.w_d = w # Desired angular velocity for the propellers [rad/sec]
# Desired attitude roll, pitch [rads]
self.att_d = np.array([0.0, 0.0, 0.0])
# Hoovering desired 3D position NED [m]
self.xyz_d = np.array([0.0, 0.0, 0.0])
self.v_ned_d = np.array([0.0, 0.0, 0.0]) # Desired vel 3D NED
self.yaw_d = 0 # Desired yaw [rad]
# Estimators
self.xi_g = 9.8 # Initial guess of gravity
self.xi_CD = 0
### GNC Functions ###
def control_att(self):
# Attitude controller Lyapunov approach
ephi = self.att[0] - self.att_d[0]
ethe = self.att[1] - self.att_d[1]
epsi = self.att[2] - self.att_d[2]
# Desired moments
self.lmn_d[0] = -self.J[0, 0]*(ephi + self.kp*self.pqr[0]) \
-(self.J[1, 1]-self.J[2, 2])*self.pqr[1]*self.pqr[2]
self.lmn_d[1] = -self.J[1, 1]*(ethe + self.kq*self.pqr[1]) \
-(self.J[2, 2]-self.J[0, 0])*self.pqr[2]*self.pqr[0] \
self.lmn_d[2] = -self.J[2, 2]*(epsi + self.kr*self.pqr[2])
def control_att_geometric(self):
R = self.Rot_bn().transpose()
Rd = self.Rotd_bn(self.att_d[0], self.att_d[1], self.att_d[2]).transpose()
e_RM = 0.5*(Rd.transpose().dot(R) - R.transpose().dot(Rd))
e_R = self.build_vector_from_tensor(e_RM)
om = np.array([self.pqr[0], self.pqr[1], self.pqr[2]])
e_om = om
M = -self.k_eR*e_R -self.k_om*e_om + np.cross(om, self.J.dot(om))
self.lmn_d = M
## Lyapunov based on controllers
def set_xyz_ned_lya(self, xyz_d):
e_alt = self.xyz[2] - xyz_d[2]
self.e_alt = e_alt
e_xy = self.xyz[0:2] - xyz_d[0:2]
self.T_d = (-self.xi_g -self.k_alt*e_alt \
-self.k_vz*self.v_ned[2])*self.m
axy = -self.k_xy*e_xy -self.k_vxy*self.v_ned[0:2]
ax = axy[0]
ay = axy[1]
# Guidance attitude
phi_d = -self.m/self.T_d*(ay*np.cos(self.att[2])-ax*np.sin(self.att[2]))
the_d = self.m/self.T_d*(ax*np.cos(self.att[2])+ay*np.sin(self.att[2]))
# Control motors
self.att_d = np.array([phi_d, the_d, self.yaw_d])
if self.att_con == 0:
self.control_att()
elif self.att_con == 1:
self.control_att_geometric()
self.w_d = np.sqrt(self.Tlmn_to_w.dot(np.append(self.T_d, self.lmn_d)))
def set_a_2D_alt_lya(self, a_2d_d, altitude_d):
e_alt = self.xyz[2] - altitude_d
self.e_alt = e_alt
self.T_d = (-self.xi_g -self.k_alt*e_alt \
-self.k_vz*self.v_ned[2])*self.m
ax = a_2d_d[0]
ay = a_2d_d[1]
# Guidance attitude
phi_d = -self.m/self.T_d*(ay*np.cos(self.att[2])-ax*np.sin(self.att[2]))
the_d = self.m/self.T_d*(ax*np.cos(self.att[2])+ay*np.sin(self.att[2]))
# Control motors
self.att_d = np.array([phi_d, the_d, self.yaw_d])
if self.att_con == 0:
self.control_att()
elif self.att_con == 1:
self.control_att_geometric()
self.w_d = np.sqrt(self.Tlmn_to_w.dot(np.append(self.T_d, self.lmn_d)))
def step_estimator_xi_g(self, dt):
self.xi_g = self.xi_g + self.k_xi_g_v*self.v_ned[2]*dt \
+ self.k_xi_g_e_alt*self.e_alt*dt
def set_v_2D_alt_lya(self, vxy_d, alt_d):
e_alt = self.xyz[2] - alt_d
self.e_alt = e_alt
vxy = self.v_ned[0:2]
e_v = vxy - vxy_d
self.e_v = e_v
self.T_d = (-self.xi_g -self.k_alt*e_alt \
-self.k_vz*self.v_ned[2])*self.m
axy = self.xi_CD*la.norm(vxy)*vxy -self.k_vxy*e_v
ax = axy[0]
ay = axy[1]
# Guidance attitude
phi_d = -self.m/self.T_d*(ay*np.cos(self.att[2])-ax*np.sin(self.att[2]))
the_d = self.m/self.T_d*(ax*np.cos(self.att[2])+ay*np.sin(self.att[2]))
# Control motors
self.att_d = np.array([phi_d, the_d, self.yaw_d])
if self.att_con == 0:
self.control_att()
elif self.att_con == 1:
self.control_att_geometric()
self.w_d = np.sqrt(self.Tlmn_to_w.dot(np.append(self.T_d, self.lmn_d)))
def step_estimator_xi_CD(self, dt):
self.xi_CD = self.xi_CD \
- self.k_xi_CD_e_v*la.norm(self.v_ned[0:2])*(self.e_v.T).dot(self.v_ned[0:2])*dt
### Physics Simulation ###
def step(self, dt):
self.step_rotors(dt)
self.step_6DoF(dt)
self.step_estimator_xi_g(dt)
self.step_estimator_xi_CD(dt)
def step_rotors(self, dt): # Motors modelled as 1st order linear system
# Check Saturation
for i in range (0, 4):
if self.w_d[i] < 0:
self.w_d[i] = 0
elif self.w_d[i] > 500:
self.w_d[i] = 500
e_w = self.w - self.w_d
w_dot = -self.kw*np.identity(4).dot(e_w)
self.w = self.w + w_dot*dt
def step_6DoF(self, dt):
Rbn = self.Rot_bn() # Rotational matrix from Nav to Body
g = np.array([0, 0, 9.81]) # Gravity vector
p_dot = Rbn.dot(self.v_ned) # Velocity in body coordinates
self.rotors_forces_moments() # Forces and moments by motors
self.aero_forces_moments() # Forces and moments by environment
# Time derivatives (acc and vel) given by physics equations
att_dot = (self.R_pqr()).dot(self.pqr)
p_ddot = (self.Ft + self.Fa)/self.m + Rbn.dot(g) \
- np.cross(self.pqr, p_dot)
pqr_dot = self.Jinv.dot(self.Mt + self.Ma \
- np.cross(self.pqr, self.J.dot(self.pqr)))
# Propagation of positions/angles and velocities
self.att = self.att + att_dot*dt
#for i in range(0,3):
# self.att[i] = self.norm_ang(self.att[i])
self.pqr = self.pqr + pqr_dot*dt
# Touching the ground?
if self.xyz[2] > 0:
self.xyz[2] = 0
if la.norm(self.v_ned) > 0.5:
print self.tag, "crashed into the ground"
self.crashed = 1
self.v_ned[0:3] = 0
else:
self.v_ned = self.v_ned + Rbn.T.dot(p_ddot)*dt
self.xyz = self.xyz + self.v_ned*dt
# Forces and moments given by motors and environment
def rotors_forces_moments(self):
Tlmn = self.w_to_Tlmn.dot(np.array([self.w[0]**2, \
self.w[1]**2, self.w[2]**2, self.w[3]**2]))
self.Ft = np.array([0, 0, Tlmn[0]]) # Thrust
self.Mt = Tlmn[1:4] # Moment
def aero_forces_moments(self):
Rbn = self.Rot_bn()
p_dot = Rbn.dot(self.v_ned)
Dl = -p_dot*la.norm(p_dot)*self.CDl # Linear drag
Dr = -self.pqr*la.norm(self.pqr)*self.CDr # Angular drag
self.Fa = Dl # Forces by the environment
self.Ma = Dr # Moments by the environment
### Misc ###
# Angles always between -pi and pi
def norm_ang(self, x):
if x > np.pi:
x = x - 2*np.pi
elif x <= -np.pi:
x = x + 2*np.pi
return x
# Rotational matrix from Nav to Body
def Rot_bn(self):
phi = self.att[0]
theta = self.att[1]
psi = self.att[2]
cphi = np.cos(phi)
sphi = np.sin(phi)
cthe = np.cos(theta)
sthe = np.sin(theta)
cpsi = np.cos(psi)
spsi = np.sin(psi)
Rx = np.array([[1, 0, 0], \
[0, cphi, sphi], \
[0, -sphi, cphi]])
Ry = np.array([[cthe, 0, -sthe], \
[ 0, 1, 0], \
[sthe, 0, cthe]])
Rz = np.array([[ cpsi, spsi, 0], \
[-spsi, cpsi, 0], \
[ 0, 0, 1]])
R = Rx.dot(Ry).dot(Rz)
return R
# Rotation matrix from Nav to given Body attitude
def Rotd_bn(self, phi, theta, psi):
cphi = np.cos(phi)
sphi = np.sin(phi)
cthe = np.cos(theta)
sthe = np.sin(theta)
cpsi = np.cos(psi)
spsi = np.sin(psi)
Rx = np.array([[1, 0, 0], \
[0, cphi, sphi], \
[0, -sphi, cphi]])
Ry = np.array([[cthe, 0, -sthe], \
[ 0, 1, 0], \
[sthe, 0, cthe]])
Rz = np.array([[ cpsi, spsi, 0], \
[-spsi, cpsi, 0], \
[ 0, 0, 1]])
R = Rx.dot(Ry).dot(Rz)
return R
# Propagation matrix for computing the angular velocity of the attitude
def R_pqr(self):
phi = self.att[0]
theta = self.att[1]
tthe = np.tan(theta)
cthe = np.cos(theta)
cphi = np.cos(phi)
sphi = np.sin(phi)
R = np.array([[1, tthe*sphi, tthe*cphi], \
[0, cphi, -sphi], \
[0, sphi/cthe, cphi/cthe]])
return R
# Building a tensor from vector and viceversa
def build_tensor_from_vector(self, a, b, c):
T = np.array([[ 0, -c, b],
[ c, 0, -a],
[-b, a, 0]])
return T
def build_vector_from_tensor(self, T):
v = np.array([T[2, 1], T[0, 2], T[1, 0]])
return v
| 2.734375
| 3
|
mb_aligner/dal/common.py
|
Gilhirith/mb_aligner_SH
| 3
|
12774440
|
import cv2
import numpy as np
from .fs_access import FSAccess
def read_image_file(fname_url):
with FSAccess(fname_url, True) as image_f:
img_buf = image_f.read()
np_arr = np.frombuffer(img_buf, np.uint8)
img = cv2.imdecode(np_arr, 0)
return img
def write_image_file(fname_url, img):
np_arr = np.getbuffer(img)
img_buf = cv2.imencode(os.path.splitext(fname_url)[1], np_arr)
with FSAccess(fname_url, True, read=False) as image_f:
image_f.write(img_buf)
| 3.03125
| 3
|
Chapter08/qt08_winStyle02.py
|
csy1993/PythonQt
| 0
|
12774441
|
# -*- coding: utf-8 -*-
# 导入模块
import sys
from PyQt5.QtWidgets import QMainWindow , QApplication
from PyQt5.QtCore import Qt
### 自定义窗口类
class MyWindow( QMainWindow):
'''自定义窗口类'''
### 构造函数
def __init__(self,parent=None):
'''构造函数'''
# 调用父类构造函数
super(MyWindow,self).__init__(parent)
# 设置窗口标记(无边框 )
self.setWindowFlags( Qt.FramelessWindowHint)
# 便于显示,设置窗口背景颜色(采用QSS)
self.setStyleSheet('''background-color:blue; ''')
###覆盖函数
def showMaximized(self):
'''最大化'''
# 得到桌面控件
desktop = QApplication.desktop()
# 得到屏幕可显示尺寸
rect = desktop.availableGeometry()
# 设置窗口尺寸
self.setGeometry(rect)
# 设置窗口显示
self.show()
### 主函数
if __name__ == "__main__":
'''主函数'''
# 声明变量
app = QApplication(sys.argv)
# 创建窗口
window = MyWindow()
# 调用最大化显示
window.showMaximized()
# 应用程序事件循环
sys.exit(app.exec_())
| 2.5625
| 3
|
examples/test_global_catalog_v1_examples.py
|
nathan-hekman/platform-services-python-sdk
| 0
|
12774442
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Examples for GlobalCatalogV1
"""
import os
import io
import uuid
import pytest
from ibm_cloud_sdk_core import ApiException, read_external_sources
from ibm_platform_services.global_catalog_v1 import *
#
# This file provides an example of how to use the Global Catalog service.
#
# The following configuration properties are assumed to be defined:
#
# GLOBAL_CATALOG_URL=<service url>
# GLOBAL_CATALOG_AUTH_TYPE=iam
# GLOBAL_CATALOG_APIKEY=<IAM apikey>
# GLOBAL_CATALOG_AUTH_URL=<IAM token service URL - omit this if using the production environment>
#
# These configuration properties can be exported as environment variables, or stored
# in a configuration file and then:
# export IBM_CREDENTIALS_FILE=<name of configuration file>
#
config_file = 'global_catalog.env'
global_catalog_service = None
catalog_entry_id = None
##############################################################################
# Start of Examples for Service: GlobalCatalogV1
##############################################################################
# region
class TestGlobalCatalogV1Examples():
"""
Example Test Class for GlobalCatalogV1
"""
@classmethod
def setup_class(cls):
global global_catalog_service
if os.path.exists(config_file):
os.environ['IBM_CREDENTIALS_FILE'] = config_file
# begin-common
global_catalog_service = GlobalCatalogV1.new_instance()
# end-common
assert global_catalog_service is not None
print('Setup complete.')
needscredentials = pytest.mark.skipif(
not os.path.exists(config_file),
reason="External configuration not available, skipping...")
@needscredentials
def test_create_catalog_entry_example(self):
"""
create_catalog_entry request example
"""
global catalog_entry_id
try:
# begin-create_catalog_entry
overview_model_EN = {
'display_name': 'Example Web Starter',
'description': 'Use the Example service in your applications',
'long_description': 'This is a starter that helps you use the Example service within your applications.',
}
image_model = {
'image': 'https://somehost.com/examplewebstarter/cachedIcon/large/0',
'small_image': 'https://somehost.com/examplewebstarter/cachedIcon/small/0',
'medium_image': 'https://somehost.com/examplewebstarter/cachedIcon/medium/0',
'feature_image': 'https://somehost.com/examplewebstarter/cachedIcon/large/0',
}
provider_model = {
'email': '<EMAIL>',
'name': 'Example Starter Co., Inc.',
'contact': 'Example Starter Developer Relations',
'support_email': '<EMAIL>',
'phone': '800-555-1234',
}
metadata_model = {
'version': '1.0.0',
}
catalog_entry_id = str(uuid.uuid4())
catalog_entry = global_catalog_service.create_catalog_entry(
name='exampleWebStarter123',
kind=CatalogEntry.KindEnum.TEMPLATE,
overview_ui={
'en': overview_model_EN
},
images=image_model,
disabled=False,
tags=['example-tag-1', 'example-tag-2'],
provider=provider_model,
id=catalog_entry_id,
active=True,
metadata=metadata_model,
).get_result()
print('\ncreate_catalog_entry() result:\n' + json.dumps(catalog_entry, indent=2))
# end-create_catalog_entry
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_catalog_entry_example(self):
"""
get_catalog_entry request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_catalog_entry
catalog_entry = global_catalog_service.get_catalog_entry(
id=catalog_entry_id,
complete=True,
).get_result()
print('\nget_catalog_entry() result:\n' + json.dumps(catalog_entry, indent=2))
# end-get_catalog_entry
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_catalog_entry_example(self):
"""
update_catalog_entry request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-update_catalog_entry
overview_model_EN = {
'display_name': 'Example Web Starter V2',
'description': 'Use the Example V2 service in your applications',
'long_description': 'This is a starter that helps you use the Example V2 service within your applications.',
}
image_model = {
'image': 'https://somehost.com/examplewebstarter/cachedIcon/large/0',
'small_image': 'https://somehost.com/examplewebstarter/cachedIcon/small/0',
'medium_image': 'https://somehost.com/examplewebstarter/cachedIcon/medium/0',
'feature_image': 'https://somehost.com/examplewebstarter/cachedIcon/large/0',
}
provider_model = {
'email': '<EMAIL>',
'name': 'Example Starter Co., Inc.',
'contact': 'Example Starter Developer Relations',
'support_email': '<EMAIL>',
'phone': '800-555-1234',
}
metadata_model = {
'version': '2.0.0',
}
catalog_entry = global_catalog_service.update_catalog_entry(
id=catalog_entry_id,
name='exampleWebStarter123',
kind=CatalogEntry.KindEnum.TEMPLATE,
overview_ui={
'en': overview_model_EN,
},
images=image_model,
disabled=False,
tags=['example-tag-1', 'example-tag-2', 'new-example-tag-3'],
provider=provider_model,
active=True,
metadata=metadata_model,
).get_result()
print('\nupdate_catalog_entry() result:\n' + json.dumps(catalog_entry, indent=2))
# end-update_catalog_entry
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_catalog_entries_example(self):
"""
list_catalog_entries request example
"""
try:
# begin-list_catalog_entries
entry_search_result = global_catalog_service.list_catalog_entries(
offset=0,
limit=10,
q='kind:template tag:example-tag-1',
complete=True,
).get_result()
print('\nlist_catalog_entries() result:\n' + json.dumps(entry_search_result, indent=2))
# end-list_catalog_entries
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_child_objects_example(self):
"""
get_child_objects request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_child_objects
entry_search_result = global_catalog_service.get_child_objects(
id=catalog_entry_id,
kind='*',
offset=0,
limit=10,
complete=True,
).get_result()
print('\nget_child_objects() result:\n' + json.dumps(entry_search_result, indent=2))
# end-get_child_objects
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_restore_catalog_entry_example(self):
"""
restore_catalog_entry request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-restore_catalog_entry
response = global_catalog_service.restore_catalog_entry(
id=catalog_entry_id,
).get_result()
print('\nrestore_catalog_entry() result:\n' + json.dumps(response, indent=2))
# end-restore_catalog_entry
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_visibility_example(self):
"""
get_visibility request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_visibility
visibility = global_catalog_service.get_visibility(
id=catalog_entry_id,
).get_result()
print('\nget_visibility() result:\n' + json.dumps(visibility, indent=2))
# end-get_visibility
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_visibility_example(self):
"""
update_visibility request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-update_visibility
response = global_catalog_service.update_visibility(
id=catalog_entry_id,
extendable=False,
).get_result()
print('\nupdate_visibility() result:\n' + json.dumps(response, indent=2))
# end-update_visibility
except ApiException as e:
print(
'update_visibility() returned the following error: {0}'.format(str(e.message)))
@needscredentials
def test_get_pricing_example(self):
"""
get_pricing request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_pricing
pricing_get = global_catalog_service.get_pricing(
id=catalog_entry_id,
).get_result()
print('\nget_pricing() result:\n' + json.dumps(pricing_get, indent=2))
# end-get_pricing
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_audit_logs_example(self):
"""
get_audit_logs request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_audit_logs
audit_search_result = global_catalog_service.get_audit_logs(
id=catalog_entry_id,
offset=0,
limit=10,
).get_result()
print('\nget_audit_logs() result:\n' + json.dumps(audit_search_result, indent=2))
# end-get_audit_logs
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_upload_artifact_example(self):
"""
upload_artifact request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-upload_artifact
artifact_contents = io.BytesIO(
b'This is an example artifact associated with a catalog entry.')
response = global_catalog_service.upload_artifact(
object_id=catalog_entry_id,
artifact_id='artifact.txt',
artifact=artifact_contents,
content_type='text/plain',
).get_result()
print('\nupload_artifact() result:\n' + json.dumps(response, indent=2))
# end-upload_artifact
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_artifact_example(self):
"""
get_artifact request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-get_artifact
response = global_catalog_service.get_artifact(
object_id=catalog_entry_id,
artifact_id='artifact.txt',
)
content_type = response.get_headers().get('content-type')
result = response.get_result()
print('\nget_artifact() result:\n')
print('Artifact content-type: {0}'.format(content_type))
print('Artifact contents: {0}'.format(str(result.content)))
# end-get_artifact
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_artifacts_example(self):
"""
list_artifacts request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-list_artifacts
artifacts = global_catalog_service.list_artifacts(
object_id=catalog_entry_id).get_result()
print('\nlist_artifacts() result:\n' + json.dumps(artifacts, indent=2))
# end-list_artifacts
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_artifact_example(self):
"""
delete_artifact request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-delete_artifact
response = global_catalog_service.delete_artifact(
object_id=catalog_entry_id,
artifact_id='artifact.txt',
).get_result()
print('\ndelete_artifact() result:\n' + json.dumps(response, indent=2))
# end-delete_artifact
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_catalog_entry_example(self):
"""
delete_catalog_entry request example
"""
global catalog_entry_id
assert catalog_entry_id is not None
try:
# begin-delete_catalog_entry
response = global_catalog_service.delete_catalog_entry(
id=catalog_entry_id).get_result()
print('\ndelete_catalog_entry() result:\n' + json.dumps(response, indent=2))
# end-delete_catalog_entry
except ApiException as e:
pytest.fail(str(e))
# endregion
##############################################################################
# End of Examples for Service: GlobalCatalogV1
##############################################################################
| 1.78125
| 2
|
Katari/sip/response/_1xx.py
|
KalbiProject/Katari
| 13
|
12774443
|
<gh_stars>10-100
from Katari.sip import SipMessage
class Trying100(SipMessage):
def __init__(self):
super().__init__()
self.method_line = "SIP/2.0 100 Trying\r\n"
class Ringing180(SipMessage):
def __init__(self):
super().__init__()
self.method_line = "SIP/2.0 180 Ringing\r\n"
class CallisBeingForwarded181:
def __init__(self):
pass
class Queued182(SipMessage):
def __init__(self):
super().__init__()
self.method_line = "SIP/2.0 182 Queued\r\n"
class SessionProgress183(SipMessage):
def __init__(self):
super().__init__()
self.method_line = "SIP/2.0 183 Session Progress\r\n"
class EarlyDialogTerminated199:
def __init__(self):
pass
| 2.203125
| 2
|
pre_process.py
|
foamliu/Chatbot-v2
| 7
|
12774444
|
<reponame>foamliu/Chatbot-v2
import pickle
import numpy as np
from tqdm import tqdm
from config import train_filename, vocab_file, maxlen_in, maxlen_out, sos_id, \
eos_id, unk_id, data_file
from utils import encode_text
def build_vocab(token):
if token not in char2idx:
next_index = len(char2idx)
char2idx[token] = next_index
idx2char[next_index] = token
def process(file):
print('processing {}...'.format(file))
with open(file, 'r', encoding='utf-8') as f:
data = f.readlines()
lengths = []
for line in tqdm(data):
sentences = line.split('|')
for sent in sentences:
sentence = sent.strip()
lengths.append(len(sentence))
tokens = list(sentence)
for token in tokens:
build_vocab(token)
np.save('lengths.npy', np.array(lengths))
def get_data(in_file):
print('getting data {}...'.format(in_file))
with open(in_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
samples = []
for line in lines:
sentences = line.split('|')
in_sentence = sentences[0].strip()
out_sentence = sentences[1].strip()
in_data = encode_text(char2idx, in_sentence)
out_data = [sos_id] + encode_text(char2idx, out_sentence) + [eos_id]
if len(in_data) < maxlen_in and len(out_data) < maxlen_out \
and unk_id not in in_data and unk_id not in out_data:
samples.append({'in': in_data, 'out': out_data})
return samples
if __name__ == '__main__':
char2idx = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}
idx2char = {0: '<pad>', 1: '<sos>', 2: '<eos>', 3: '<unk>'}
process(train_filename)
print(len(char2idx))
print(list(char2idx.items())[:100])
data = {
'dict': {
'char2idx': char2idx,
'idx2char': idx2char
}
}
with open(vocab_file, 'wb') as file:
pickle.dump(data, file)
samples = get_data(train_filename)
np.random.shuffle(samples)
num_samples = len(samples)
num_valid = 1000
num_test = 10
valid = samples[:num_valid]
test = samples[num_valid:num_valid + num_test]
train = samples[num_valid + num_test:]
print('num_samples: ' + str(len(samples)))
data = {
'train': train,
'valid': valid,
'test': test
}
print('num_train: ' + str(len(train)))
print('num_valid: ' + str(len(valid)))
print('num_test: ' + str(len(test)))
with open(data_file, 'wb') as file:
pickle.dump(data, file)
| 2.703125
| 3
|
chemicalmatcher/__init__.py
|
ericmbell1/standardizedinventories
| 14
|
12774445
|
<gh_stars>10-100
# __init__.py (chemicalmatcher)
# !/usr/bin/env python3
# coding=utf-8
"""
Public API for chemicalymatcher. Functions to allow retrieval of chemical and
substance matches from precompiled chemical match lists or the EPA
SRS web service
"""
from chemicalmatcher.programsynonymlookupbyCAS import programsynonymlookupbyCAS
from chemicalmatcher.writeStEWIchemicalmatchesbyinventory import\
writeChemicalMatches
from chemicalmatcher.globals import log, read_cm_file
def get_matches_for_StEWI(inventory_list = []):
"""Retrieves all precompiled chemical matches
:param inventory_list: optional list of inventories, if passed will check
for their presence in the chemical matcher output
:return: dataframe in ChemicalMatches standard output format
"""
chemicalmatches = read_cm_file()
if inventory_list != []:
inventories = set(chemicalmatches['Source'].unique())
if set(inventory_list).issubset(inventories):
log.debug('all inventories found in chemical matcher')
else:
log.info('inventories missing in chemical matcher, regenerating '
'chemical matches')
writeChemicalMatches()
chemicalmatches = read_cm_file()
return chemicalmatches
def get_program_synomyms_for_CAS_list(cas_list, inventories_of_interest):
"""Gets program synonym names for chemicals by CAS using SRS web service
:param cas_list: a list of CAS numbers as strings, e.g. ['124-38-9', '74-82-8']
:param inventories_of_interest: inventory acronym, e.g. ['TRI'].
Valid for 'TRI','NEI', or 'DMR'
:return: dataframe with columns 'CAS' and inventory acronym with program names
"""
df_of_synonyms_by_cas = programsynonymlookupbyCAS(
cas_list, inventories_of_interest)
return df_of_synonyms_by_cas
| 2.578125
| 3
|
exercises/exercise04_underdamped_free_plot.py
|
WangKBJames/cobem2019-modal-analysis-python
| 6
|
12774446
|
import sympy
from sympy import Function, dsolve, Symbol
# symbols
t = Symbol('t', positive=True)
zeta = Symbol('\zeta', constant=True, positive=True)
# unknown function
u = Function('u')(t)
# assumed values
u0 = 1
v0 = 0
wn = 4.
wd = wn*sympy.sqrt(1-zeta**2)
ics = {u.subs(t, 0): u0, u.diff(t).subs(t, 0): v0}
sol = dsolve(u.diff(t, t) + 2*zeta*wn*u.diff(t) + wn**2*u, ics=ics)
import matplotlib
matplotlib.use('TkAgg')
from sympy.plotting import plot3d
p1 = plot3d(sol.rhs, (t, 0, 10), (zeta, 0.05, 0.7),
show=False,
nb_of_points_x=500,
nb_of_points_y=10,
xlabel='$t$',
ylabel='$\zeta$',
zlabel='$u(t)$',
)
p1.show()
| 2.765625
| 3
|
events/models.py
|
0xelectron/mhtportal-web
| 0
|
12774447
|
import datetime
from django.db import models
from django.utils.translation import ugettext as _
from django.core.validators import RegexValidator
from base.models import (Address, Center, Participant)
class EventCategory(models.Model):
category = models.CharField(max_length=50, default="", help_text=_("Event Category"))
def __str__(self):
return "Event Category: {}".format(self.category)
class Event(models.Model):
"""Event represents an particular Event.
venue field is an foreign key to :model: `base.Address`
"""
# Choices
YEAR_CHOICES = []
curr_year = datetime.datetime.now().year
for r in range(2016, curr_year + 5):
YEAR_CHOICES.append((r,r))
GENDER_FEMALE = 'female'
GENDER_MALE = 'male'
GENDER_CHOICES = (
(GENDER_FEMALE, 'Female'),
(GENDER_MALE, 'Male'))
name = models.CharField(max_length=50, help_text=_("Event Name"))
venue = models.ForeignKey(Address, on_delete=models.CASCADE)
center = models.ForeignKey(Center, on_delete=models.CASCADE, help_text=_("Center"))
category = models.ForeignKey(EventCategory, on_delete=models.CASCADE, help_text=_("Event Category"), default="")
year = models.PositiveIntegerField(choices=YEAR_CHOICES, default=curr_year,
help_text=_('year'))
start_date = models.DateField(help_text=_("Event Start Date"))
end_date = models.DateField(help_text=_("Event End Date"))
last_date_of_registration = models.DateField(
help_text=_("Last Date of Registration"))
fees = models.DecimalField(max_digits=10, decimal_places=2,
help_text=_("Event Fees"))
late_fees = models.DecimalField(max_digits=10, decimal_places=2,
help_text=_("Late Registration Fees"))
accommodation_provided = models.BooleanField(help_text=_("Is Accommodation Provided?"))
event_code = models.CharField(max_length=100, unique=True, help_text=_("Event Code"))
gender = models.CharField(max_length=6, choices=GENDER_CHOICES, blank=True)
# This represents age-group
min_age = models.PositiveIntegerField(help_text=_("Age Group lower limit"))
max_age = models.PositiveIntegerField(help_text=_("Age Group Upper limit"))
rules = models.TextField(help_text=_("Any Rules"), blank=True)
remarks = models.TextField(help_text=_("Any Remarks"), blank=True)
active = models.BooleanField(help_text=_("Is event active?"))
poc_name = models.CharField(max_length=50, help_text="Name of point of contact", blank=True)
poc_number = models.CharField(max_length=50, help_text="Contact number of POC", blank=True)
is_global_poc = models.BooleanField(help_text=_("Is global PoC?"), default=False)
class EventParticipant(models.Model):
"""EventParticipant stores information about an participant for the Event.
The EventParticipant table contains information about an event participant
for an event.
event field is an foreign key to :model: `events.Event`
pariticipant field is an foreign key to :model: `base.Pariticpant`
home_center field is an foreign key to :model: `base.Center`
event_center field is optional and an foreign key to :model: `base.Center`
"""
# Choices
ROLE_PARTICIPANT = 'participant'
ROLE_HELPER = 'helper'
ROLE_COORDINATOR = 'coordinator'
ROLE_CHOICES = (
(ROLE_PARTICIPANT, 'Participant'),
(ROLE_HELPER, 'Helper'),
(ROLE_COORDINATOR, 'Coordinator'))
event = models.ForeignKey(Event, on_delete=models.CASCADE)
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
registration_no = models.CharField(max_length=100, unique=True, help_text=_("Registration Number"))
home_center = models.ForeignKey(Center, on_delete=models.CASCADE, related_name='home_center',
help_text=_("Home Center"))
event_center = models.ForeignKey(Center, on_delete=models.CASCADE, blank=True, null=True,
related_name='event_center', help_text=_("Event Center"))
accommodation = models.BooleanField(help_text=_("Is Accommodation Required?"))
payment_status = models.BooleanField(help_text=_("Has paid?"))
amount_paid = models.DecimalField(max_digits=10, decimal_places=2, help_text=_("Amount Paid"))
cashier = models.CharField(max_length=50, help_text=_("Cashier"), blank=True)
big_buddy = models.CharField(max_length=50, help_text=_("Big Buddy"), blank=True)
goal_achievement = models.CharField(max_length=100, help_text=_("Goal Achievement"), blank=True)
role = models.CharField(max_length=12, choices=ROLE_CHOICES, help_text=_("Role"))
registration_status = models.PositiveSmallIntegerField(default=0, help_text=_("Registration Status"))
created_on = models.DateTimeField(auto_now_add=True, help_text=_("Event Participant Created on"))
updated_on = models.DateTimeField(auto_now=True, help_text=_("Event Participant Updated on"))
skill = models.TextField(blank=True, help_text=_("Skill"))
| 2.484375
| 2
|
ibmcloud_image_uploader/ibmcloud_cos_image_uploader.py
|
tsanghan/tmos-cloudinit
| 0
|
12774448
|
#!/usr/bin/env python
# coding=utf-8
# pylint: disable=broad-except,unused-argument,line-too-long, unused-variable
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the logic to scan for patched TMOS disk images
and then upload to IBM Cloud Object Storage
"""
import os
import sys
import time
import datetime
import logging
import json
import ibm_boto3
import urlparse
import requests
from ibm_botocore.client import Config, ClientError
IMAGE_TYPES = ['.qcow2', '.vhd', '.vmdk']
IBM_COS_REGIONS = []
TMOS_IMAGE_DIR = None
COS_API_KEY = None
COS_RESOURCE_CRN = None
COS_IMAGE_LOCATION = None
COS_AUTH_ENDPOINT = None
COS_ENDPOINT = None
UPDATE_IMAGES = None
DELETE_ALL = None
LOG = logging.getLogger('ibmcloud_cos_image_uploader')
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGSTREAM = logging.StreamHandler(sys.stdout)
LOGSTREAM.setFormatter(FORMATTER)
LOG.addHandler(LOGSTREAM)
def get_patched_images(tmos_image_dir):
"""get TMOS patched disk images"""
return_image_files = []
LOG.debug('searching for images in %s', tmos_image_dir)
for patched_dir in os.listdir(tmos_image_dir):
patched_dir_path = "%s/%s" % (tmos_image_dir, patched_dir)
if os.path.isdir(patched_dir_path):
for patched_image in os.listdir(patched_dir_path):
if os.path.splitext(patched_image)[1] in IMAGE_TYPES:
image_filepath = "%s/%s" % (patched_dir_path,
patched_image)
return_image_files.append(image_filepath)
return return_image_files
def get_bucket_name(image_path, location):
"""Get bucket for this patched image"""
return "%s-%s" % (os.path.splitext(os.path.dirname(image_path.replace(TMOS_IMAGE_DIR, '')).replace(os.path.sep, ''))[0].replace('_', '-').lower(), location)
def get_object_name(image_path, location):
"""Get object name for this patched image"""
if 'DATASTOR' in image_path:
return "%s_DATASTOR" % os.path.dirname(image_path.replace(TMOS_IMAGE_DIR, '')).replace(os.path.sep, '')
else:
return os.path.dirname(image_path.replace(TMOS_IMAGE_DIR, '')).replace(os.path.sep, '')
def get_cos_client(location):
"""return IBM COS client object"""
cos_endpoint = "https://s3.%s.cloud-object-storage.appdomain.cloud" % location
return ibm_boto3.client("s3",
ibm_api_key_id=COS_API_KEY,
ibm_service_instance_id=COS_RESOURCE_CRN,
ibm_auth_endpoint=COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=cos_endpoint)
def get_cos_resource(location):
"""return IBM COS resource object"""
cos_endpoint = "https://s3.%s.cloud-object-storage.appdomain.cloud" % location
return ibm_boto3.resource("s3",
ibm_api_key_id=COS_API_KEY,
ibm_service_instance_id=COS_RESOURCE_CRN,
ibm_auth_endpoint=COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=cos_endpoint)
def assure_bucket(bucket_name, location):
"""Make sure bucket exists"""
cos_res = get_cos_resource(location)
try:
for bucket in cos_res.buckets.all():
if bucket.name == bucket_name:
return True
LOG.debug('creating bucket %s', bucket_name)
cos_res.Bucket(bucket_name).create(
ACL='public-read'
)
return True
except ClientError as client_error:
LOG.error('client error assuring bucket %s: %s',
bucket_name, client_error)
return False
except Exception as ex:
LOG.error('exception occurred assuring bucket %s: %s', bucket_name, ex)
return False
def assure_object(file_path, bucket_name, object_name, location):
"""check if patched image already exists"""
cos_res = get_cos_resource(location)
try:
for obj in cos_res.Bucket(bucket_name).objects.all():
if obj.key == object_name:
if UPDATE_IMAGES:
obj.delete()
else:
return True
LOG.debug('starting upload of image %s to %s/%s',
file_path, bucket_name, object_name)
part_size = 1024 * 1024 * 2
file_threshold = 1024 * 1024 * 1024 * 10
transfer_config = ibm_boto3.s3.transfer.TransferConfig(
multipart_threshold=file_threshold,
multipart_chunksize=part_size
)
cos_client = get_cos_client(location)
transfer_mgr = ibm_boto3.s3.transfer.TransferManager(
cos_client, config=transfer_config)
upload = transfer_mgr.upload(file_path, bucket_name, object_name, extra_args={'ACL': 'public-read'})
upload.result()
LOG.debug('upload complete for %s/%s', bucket_name, object_name)
return True
except ClientError as ce:
LOG.error('client error assuring object %s/%s: %s',
bucket_name, object_name, ce)
return False
except Exception as ex:
LOG.error('exception occurred assuring object %s/%s: %s',
bucket_name, object_name, ex)
return False
def assure_cos_image(image_path, location):
"""assure patch image object"""
bucket_name = get_bucket_name(image_path, location)
object_name = get_object_name(image_path, location)
LOG.debug('checking IBM COS Object: %s/%s exists',
bucket_name, object_name)
if assure_bucket(bucket_name, location):
assure_object(image_path, bucket_name, object_name, location)
md5_path = "%s.md5" % image_path
if os.path.exists(md5_path):
md5_object_name = "%s.md5" % object_name
assure_object(md5_path, bucket_name, md5_object_name, location)
sig_path = "%s.384.sig" % image_path
if os.path.exists(sig_path):
sig_object_name = "%s.384.sig" % object_name
assure_object(sig_path, bucket_name, sig_object_name, location)
def delete_all():
"""delete all files and buckets from the COS resource"""
LOG.debug('deleting images in: %s', IBM_COS_REGIONS)
for location in IBM_COS_REGIONS:
LOG.debug("deleting images in %s region" % location)
cos_res = get_cos_resource(location)
try:
for bucket in cos_res.buckets.all():
if location in bucket.name:
LOG.debug('deleting bucket: %s', bucket.name)
for obj in cos_res.Bucket(bucket.name).objects.all():
LOG.debug('deleting object: %s', obj.key)
obj.delete()
bucket.delete()
except ClientError as client_error:
LOG.error('client error deleting all resources: %s', client_error)
except Exception as ex:
LOG.error('exception occurred deleting all resources: %s', ex)
def upload_patched_images():
"""check for iamges and assure upload to IBM COS"""
LOG.debug('uploading images in %s', IBM_COS_REGIONS)
for image_path in get_patched_images(TMOS_IMAGE_DIR):
for location in IBM_COS_REGIONS:
assure_cos_image(image_path, location)
def inventory():
"""create inventory JSON"""
global UPDATE_IMAGES
inventory_file = "%s/ibmcos_images.json" % (TMOS_IMAGE_DIR)
if os.path.exists(inventory_file):
os.unlink(inventory_file)
inventory = {}
for location in IBM_COS_REGIONS:
inventory[location] = []
cos_res = get_cos_resource(location)
try:
for bucket in cos_res.buckets.all():
if location in bucket.name:
for obj in cos_res.Bucket(bucket.name).objects.all():
LOG.debug('inventory add %s/%s', bucket.name, obj.key)
if os.path.splitext(obj.key)[1] in IMAGE_TYPES:
inv_obj = {
'image_name': bucket.name.replace('.', '-'),
'image_sql_url': "cos://%s/%s/%s" % (location, bucket.name, obj.key),
'md5_sql_url': "cos://%s/%s/%s.md5" % (location, bucket.name, obj.key)
}
inventory[location].append(inv_obj)
except ClientError as client_error:
LOG.error('client error creating inventory of resources: %s', client_error)
except Exception as ex:
LOG.error('exception creating inventory of resources: %s', ex)
# write it locally
with open(inventory_file, 'w') as ivf:
ivf.write(json.dumps(inventory))
# store in each location
if not DELETE_ALL:
UPDATE_IMAGES = True
for location in IBM_COS_REGIONS:
bucket_name = "f5-image-catalog-%s" % location
public_url = "https://%s.s3.%s.cloud-object-storage.appdomain.cloud/f5-image-catalog.json" % (bucket_name, location)
LOG.debug('writing image catalog to: %s', public_url)
assure_bucket(bucket_name, location)
assure_object(inventory_file, bucket_name, "f5-image-catalog.json", location)
def initialize():
"""initialize configuration from environment variables"""
global TMOS_IMAGE_DIR, IBM_COS_REGIONS, COS_API_KEY, COS_RESOURCE_CRN, COS_IMAGE_LOCATION, COS_AUTH_ENDPOINT, UPDATE_IMAGES, DELETE_ALL
TMOS_IMAGE_DIR = os.getenv('TMOS_IMAGE_DIR', None)
COS_API_KEY = os.getenv('COS_API_KEY', None)
COS_RESOURCE_CRN = os.getenv('COS_RESOURCE_CRN', None)
COS_IMAGE_LOCATION = os.getenv('COS_IMAGE_LOCATION', 'us-south')
IBM_COS_REGIONS = [ x.strip() for x in COS_IMAGE_LOCATION.split(',') ]
COS_AUTH_ENDPOINT = os.getenv(
'COS_AUTH_ENDPOINT', 'https://iam.cloud.ibm.com/identity/token')
UPDATE_IMAGES = os.getenv('UPDATE_IMAGES', 'false')
if UPDATE_IMAGES.lower() == 'true':
UPDATE_IMAGES = True
else:
UPDATE_IMAGES = False
DELETE_ALL = os.getenv('DELETE_ALL', 'false')
if DELETE_ALL.lower() == 'true':
DELETE_ALL = True
else:
DELETE_ALL = False
if __name__ == "__main__":
START_TIME = time.time()
LOG.debug('process start time: %s', datetime.datetime.fromtimestamp(
START_TIME).strftime("%A, %B %d, %Y %I:%M:%S"))
initialize()
ERROR_MESSAGE = ''
ERROR = False
if not COS_API_KEY:
ERROR = True
ERROR_MESSAGE += "please set env COS_API_KEY for your IBM COS resource\n"
if not COS_RESOURCE_CRN:
ERROR = True
ERROR_MESSAGE += "please set env COS_RESOURCE_CRN for your IBM COS resource\n"
if not TMOS_IMAGE_DIR and not DELETE_ALL:
ERROR = True
ERROR_MESSAGE += "please set env TMOS_IMAGE_DIR to scan for patched TMOS images\n"
if ERROR:
LOG.error('\n\n%s\n', ERROR_MESSAGE)
sys.exit(1)
if DELETE_ALL:
delete_all()
else:
upload_patched_images()
inventory()
STOP_TIME = time.time()
DURATION = STOP_TIME - START_TIME
LOG.debug(
'process end time: %s - ran %s (seconds)',
datetime.datetime.fromtimestamp(
STOP_TIME).strftime("%A, %B %d, %Y %I:%M:%S"),
DURATION
)
| 1.84375
| 2
|
napari/_qt/widgets/qt_extension2reader.py
|
MaksHess/napari
| 0
|
12774449
|
from qtpy.QtCore import Qt, Signal
from qtpy.QtWidgets import (
QHBoxLayout,
QLabel,
QPushButton,
QTableWidget,
QTableWidgetItem,
QVBoxLayout,
QWidget,
)
from ...settings import get_settings
from ...utils.translations import trans
class Extension2ReaderTable(QWidget):
"""Table showing extension to reader mappings with removal button.
Widget presented in preferences-plugin dialog."""
valueChanged = Signal(int)
def __init__(self, parent=None):
super().__init__(parent=parent)
self._table = QTableWidget()
self._table.setShowGrid(False)
self._populate_table()
layout = QVBoxLayout()
layout.addWidget(self._table)
self.setLayout(layout)
def _populate_table(self):
"""Add row for each extension to reader mapping in settings"""
self._extension_col = 0
self._reader_col = 1
header_strs = [trans._('Extension'), trans._('Reader Plugin')]
self._table.setColumnCount(2)
self._table.setColumnWidth(self._extension_col, 100)
self._table.setColumnWidth(self._reader_col, 150)
self._table.verticalHeader().setVisible(False)
self._table.setMinimumHeight(120)
extension2reader = get_settings().plugins.extension2reader
if len(extension2reader) > 0:
self._table.setRowCount(len(extension2reader))
self._table.horizontalHeader().setStretchLastSection(True)
self._table.horizontalHeader().setStyleSheet(
'border-bottom: 2px solid white;'
)
self._table.setHorizontalHeaderLabels(header_strs)
for row, (extension, plugin_name) in enumerate(
extension2reader.items()
):
item = QTableWidgetItem(extension)
item.setFlags(Qt.NoItemFlags)
self._table.setItem(row, self._extension_col, item)
plugin_widg = QWidget()
# need object name to easily find row
plugin_widg.setObjectName(f'{extension}')
plugin_widg.setLayout(QHBoxLayout())
plugin_widg.layout().setContentsMargins(0, 0, 0, 0)
plugin_label = QLabel(plugin_name)
# need object name to easily work out which button was clicked
remove_btn = QPushButton('x', objectName=f'{extension}')
remove_btn.setFixedWidth(30)
remove_btn.setStyleSheet('margin: 4px;')
remove_btn.setToolTip(
'Remove this extension to reader association'
)
remove_btn.clicked.connect(self._remove_extension_assignment)
plugin_widg.layout().addWidget(plugin_label)
plugin_widg.layout().addWidget(remove_btn)
self._table.setCellWidget(row, self._reader_col, plugin_widg)
else:
# Display that there are no extensions with reader associations
self._table.setRowCount(1)
self._table.setHorizontalHeaderLabels(header_strs)
self._table.setColumnHidden(self._reader_col, True)
self._table.setColumnWidth(self._extension_col, 200)
item = QTableWidgetItem(trans._('No extensions found.'))
item.setFlags(Qt.NoItemFlags)
self._table.setItem(0, 0, item)
def _remove_extension_assignment(self, event):
"""Delete extension to reader mapping setting and remove table row"""
extension_to_remove = self.sender().objectName()
current_settings = get_settings().plugins.extension2reader
# need explicit assignment to new object here for persistence
get_settings().plugins.extension2reader = {
k: v
for k, v in current_settings.items()
if k != extension_to_remove
}
for i in range(self._table.rowCount()):
row_widg_name = self._table.cellWidget(
i, self._reader_col
).objectName()
if row_widg_name == extension_to_remove:
self._table.removeRow(i)
return
| 1.96875
| 2
|
TAT_setup.py
|
jacob975/TATIRP
| 0
|
12774450
|
<filename>TAT_setup.py
#!/usr/bin/python
'''
Program:
This is a program to setup tat python code.
For Linux and macOS only
Usage:
1. TAT_setup.py
editor Jacob975
20170809
#################################
update log
20170809 version alpha 1
It can run properly
20170830 version alpha 2
1. combine the linux version and macOS version
20171114 version alpha 3:
1. Using py to save environment constant instead of a dat file.
20180625 versiona alpha 4:
1. Path of result is removed
'''
import time
import glob
import os
import fnmatch
import TAT_env
from sys import platform, exit
def readfile(filename):
f = open(filename, 'r')
data = []
for line in f.readlines():
# skip if no data or it's a hint.
if line == "\n" or line.startswith('#'):
continue
data.append(line[:-1])
f.close
return data
# This method is used to set the path in the first line of programm
def set_path_linux(py_path, path = ""):
py_list = glob.glob("{0}/*.py".format(path))
for name in py_list:
temp = 'sed -i "1s?.*?{0}?" {1}'.format(py_path, name)
if VERBOSE>0:print temp
os.system(temp)
# This method is used to set the path in the first line of each tat_python programm file
def set_path_mac(py_path, path = ""):
py_list = glob.glob("{0}/*.py".format(path))
for name in py_list:
temp = 'sed -i '' -e "1s?.*?{0}?" {1}'.format(py_path, name)
if VERBOSE>0:print temp
os.system(temp)
# remove by-product
temp = "rm *-e"
os.system(temp)
#--------------------------------------------
# main code
VERBOSE = 1
# measure times
start_time = time.time()
# get the path of python from TAT_env.py
py_path = TAT_env.path_of_python
py_path = "#!{0}".format(py_path)
if VERBOSE>0:print "path of python or python2 writen in tat_config: {0}".format(py_path)
# get path of TAT code from TAT_env
code_path = TAT_env.path_of_code
# set path of python into all tat_python program file
if platform == "linux" or platform == "linux2":
set_path_linux(py_path, code_path)
# process all code in nest folder
obj_list = glob.glob("*")
for obj in obj_list:
if os.path.isdir(obj):
os.chdir(obj)
temp_path = "{0}/{1}".format(code_path, obj)
set_path_linux(py_path, temp_path)
os.chdir("..")
elif platform == "darwin":
set_path_mac(py_path, code_path)
# process all code in nest folder
obj_list = glob.glob("*")
for obj in obj_list:
if os.path.isdir(obj):
os.chdir(obj)
temp_path = "{0}/{1}".format(code_path, obj)
set_path_mac(py_path, temp_path)
os.chdir("..")
else:
print "you system is not fit the requirement of tat_python"
print "Please use Linux of macOS"
exit()
# back to path of code
print code_path
os.chdir(code_path)
# get path of source from TAT_env
path_of_image = TAT_env.path_of_image
# construct necessary folder
if VERBOSE>0:print "construct necessary folders..."
# source folder
temp = "mkdir -p {0}/TF/image".format(path_of_image)
os.system(temp)
temp = "mkdir -p {0}/TF/calibrate".format(path_of_image)
os.system(temp)
temp = "mkdir -p {0}/TF/log".format(path_of_image)
os.system(temp)
temp = "mkdir -p {0}/KU/image".format(path_of_image)
os.system(temp)
temp = "mkdir -p {0}/KU/calibrate".format(path_of_image)
os.system(temp)
temp = "mkdir -p {0}/KU/log".format(path_of_image)
os.system(temp)
# measuring time
elapsed_time = time.time() - start_time
print "Exiting Setup Program, spending ", elapsed_time, "seconds."
| 2.640625
| 3
|