content stringlengths 5 1.05M |
|---|
import numpy as np
import pandas as pd
import collections
from . import arrops
from .region import parse_region, regions_add_name_column
_rc = {
'colnames':{
'chrom':'chrom',
'start':'start',
'end':'end'
}
}
def _get_default_colnames():
return _rc['colnames']['chrom'], _rc['colnames']['start'], _rc['colnames']['end']
class update_default_colnames:
def __init__(self, new_colnames):
self._old_colnames = dict(_rc['colnames'])
if isinstance(new_colnames, collections.Iterable):
if len(new_colnames) != 3:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
(_rc['colnames']['chrom'],
_rc['colnames']['start'],
_rc['colnames']['end']) = new_colnames
elif isinstance(new_colnames, collections.Mapping):
_rc['colnames'].update({k:v for k,v in new_colnames.items()
if k in ['chrom', 'start', 'end']})
else:
raise ValueError(
'Please, specify new columns using a list of '
'3 strings or a dict!')
def __enter__(self):
return self
def __exit__(self, *args):
_rc['colnames'] = self._old_colnames
def _verify_columns(df, colnames):
"""
df: pandas.DataFrame
colnames: list of columns
"""
if not set(colnames).issubset(df.columns):
raise ValueError(
", ".join(set(colnames).difference(set(df.columns)))
+ " not in keys of df.columns"
)
def select(df, region, cols=None):
"""
Return all genomic intervals in a dataframe that overlap
a genomic region.
Parameters
----------
df : pandas.DataFrame
region : UCSC str
The genomic region to select from the dataframe.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
chrom, start, end = parse_region(region)
if chrom is None:
raise ValueError("no chromosome detected, check region input")
if (start is not None) and (end is not None):
inds = (
(df.chrom.values == chrom)
& (df.start.values < end)
& (df.end.values > start)
)
else:
inds = df.chrom.values == chrom
return df.iloc[np.where(inds)[0]]
def expand(df, pad, limits=None, side="both", limits_region_col=None, cols=None):
"""
Expand each interval by a given amount.
Parameters
----------
df : pandas.DataFrame
pad : int
The amount by which the intervals are expanded *on each side*.
limits : {str: int} or {str: (int, int)}
The limits of interval expansion. If a single number X if provided,
the expanded intervals are trimmed to fit into (0, X); if a tuple
of numbers is provided (X,Y), the new intervals are trimmed to (X, Y).
side : str
Which side to expand, possible values are "left", "right" and "both".
region_col : str
The column to select the expansion limits for each interval.
If None, then use the chromosome column.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df : pandas.DataFrame
"""
ck, sk, ek = _get_default_colnames() if cols is None else cols
limits_region_col = ck if limits_region_col is None else limits_region_col
if limits:
lower_limits = {}
upper_limits = {}
for k, v in dict(limits).items():
if isinstance(v, (tuple, list, np.ndarray)):
lower_limits[k] = v[0]
upper_limits[k] = v[1]
elif np.isscalar(v):
upper_limits[k] = v
lower_limits[k] = 0
else:
raise ValueError("Unknown limit type: {type(v)}")
if side == "both" or side == "left":
if limits:
df[sk] = np.maximum(
df[limits_region_col].apply(lower_limits.__getitem__, 0),
df[sk].values - pad,
)
else:
df[sk] = df[sk].values - pad
if side == "both" or side == "right":
if limits:
df[ek] = np.minimum(
df[limits_region_col].apply(
upper_limits.__getitem__, np.iinfo(np.int64).max
),
df[ek] + pad,
)
else:
df[ek] = df[ek] + pad
return df
def _overlap_intidxs(
df1, df2, how="left", keep_order=False, cols1=None, cols2=None, on=None
):
"""
Find pairs of overlapping genomic intervals and return the integer
indices of the overlapping intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list or None
Additional shared columns to consider as separate groups
Returns
-------
overlap_ids : numpy.ndarray
The indices of the overlapping genomic intervals in the original
dataframes. The 1st column contains the indices of intervals
from the 1st set, the 2nd column - the indicies from the 2nd set.
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
_verify_columns(df1, [ck1, sk1, ek1])
_verify_columns(df2, [ck2, sk2, ek2])
# Switch to integer indices.
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
# Find overlapping intervals per chromosome.
group_list1 = [ck1]
group_list2 = [ck2]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if (ck1 in on) or (ck2 in on):
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df1, on)
_verify_columns(df2, on)
group_list1 += on
group_list2 += on
df1_groups = df1.groupby(group_list1).groups
df2_groups = df2.groupby(group_list2).groups
all_groups = sorted(
set.union(set(df1_groups), set(df2_groups))
) ### breaks if any of the groupby elements are pd.NA...
# all_groups = list(set.union(set(df1_groups), set(df2_groups))) ### disagrees with pyranges order so a test fails...
overlap_intidxs = []
for group_keys in all_groups:
df1_group_idxs = (
df1_groups[group_keys].values
if (group_keys in df1_groups)
else np.array([])
)
df2_group_idxs = (
df2_groups[group_keys].values
if (group_keys in df2_groups)
else np.array([])
)
overlap_intidxs_sub = []
both_groups_nonempty = (df1_group_idxs.size > 0) and (df2_group_idxs.size > 0)
if both_groups_nonempty:
overlap_idxs_loc = arrops.overlap_intervals(
df1[sk1].values[df1_group_idxs],
df1[ek1].values[df1_group_idxs],
df2[sk2].values[df2_group_idxs],
df2[ek2].values[df2_group_idxs],
)
# Convert local per-chromosome indices into the
# indices of the original table.
overlap_intidxs_sub += [
[
df1_group_idxs[overlap_idxs_loc[:, 0]],
df2_group_idxs[overlap_idxs_loc[:, 1]],
]
]
if how in ["outer", "left"] and df1_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids1 = df1_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 0], minlength=len(df1_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids1 = df1_group_idxs
overlap_intidxs_sub += [
[no_overlap_ids1, -1 * np.ones_like(no_overlap_ids1),]
]
if how in ["outer", "right"] and df2_group_idxs.size > 0:
if both_groups_nonempty:
no_overlap_ids2 = df2_group_idxs[
np.where(
np.bincount(
overlap_idxs_loc[:, 1], minlength=len(df2_group_idxs)
)
== 0
)[0]
]
else:
no_overlap_ids2 = df2_group_idxs
overlap_intidxs_sub += [
[-1 * np.ones_like(no_overlap_ids2), no_overlap_ids2,]
]
if overlap_intidxs_sub:
overlap_intidxs.append(
np.block(
[
[idxs[:, None] for idxs in idxs_pair]
for idxs_pair in overlap_intidxs_sub
]
)
)
if len(overlap_intidxs) == 0:
return np.ndarray(shape=(0, 2), dtype=np.int)
overlap_intidxs = np.vstack(overlap_intidxs)
if keep_order:
order = np.lexsort([overlap_intidxs[:, 1], overlap_intidxs[:, 0]])
overlap_intidxs = overlap_intidxs[order]
return overlap_intidxs
def overlap(
df1,
df2,
how="left",
return_input=True,
return_index=False,
return_overlap=False,
suffixes=("_1", "_2"),
keep_order=False,
cols1=None,
cols2=None,
on=None,
):
"""
Find pairs of overlapping genomic intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
return_input : bool
If True, return columns from input dfs. Default True.
return_index : bool
If True, return indicies of overlapping pairs. Default False.
return_overlap
If True, return overlapping intervals for the overlapping pairs. Default False.
suffixes : (str, str)
The suffixes for the columns of the two overlapped sets.
keep_order : bool
<< to be documented >>
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list
List of column names to perform clustering on indepdendently, passed as an argument
to df.groupby when considering overlaps. Default is ['chrom'], which must match the first name
from cols. Examples for additional columns include 'strand'.
Returns
-------
df_overlap : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
overlap_df_idxs = _overlap_intidxs(
df1, df2, how=how, cols1=cols1, cols2=cols2, keep_order=keep_order, on=on,
)
# Generate output tables.
df_index_1 = None
df_index_2 = None
if return_index:
index_col = return_index if isinstance(return_index, str) else "index"
df_index_1 = pd.DataFrame(
{index_col + suffixes[0]: df1.index[overlap_df_idxs[:, 0]]}
)
df_index_2 = pd.DataFrame(
{index_col + suffixes[1]: df2.index[overlap_df_idxs[:, 1]]}
)
df_overlap = None
if return_overlap:
overlap_col = return_overlap if isinstance(return_overlap, str) else "overlap"
overlap_start = np.maximum(
df1[sk1].values[overlap_df_idxs[:, 0]],
df2[sk2].values[overlap_df_idxs[:, 1]],
)
overlap_end = np.minimum(
df1[ek1].values[overlap_df_idxs[:, 0]],
df2[ek2].values[overlap_df_idxs[:, 1]],
)
df_overlap = pd.DataFrame(
{
overlap_col + "_" + sk1: overlap_start,
overlap_col + "_" + ek1: overlap_end,
}
)
df_input_1 = None
df_input_2 = None
if return_input is True or str(return_input) == "1" or return_input == "left":
df_input_1 = df1.iloc[overlap_df_idxs[:, 0]].reset_index(drop=True)
df_input_1.columns = [c + suffixes[0] for c in df_input_1.columns]
if return_input is True or str(return_input) == "2" or return_input == "right":
df_input_2 = df2.iloc[overlap_df_idxs[:, 1]].reset_index(drop=True)
df_input_2.columns = [c + suffixes[1] for c in df_input_2.columns]
# Masking non-overlapping regions if using non-inner joins.
if how != "inner":
if df_input_1 is not None:
df_input_1[overlap_df_idxs[:, 0] == -1] = pd.NA
if df_input_2 is not None:
df_input_2[overlap_df_idxs[:, 1] == -1] = pd.NA
if df_index_1 is not None:
df_index_1[overlap_df_idxs[:, 0] == -1] = pd.NA
if df_index_2 is not None:
df_index_2[overlap_df_idxs[:, 1] == -1] = pd.NA
if df_overlap is not None:
df_overlap[
(overlap_df_idxs[:, 0] == -1) | (overlap_df_idxs[:, 1] == -1)
] = pd.NA
out_df = pd.concat(
[df_index_1, df_input_1, df_index_2, df_input_2, df_overlap], axis="columns"
)
return out_df
def cluster(
df,
min_dist=0,
cols=None,
on=None,
return_input=True,
return_cluster_ids=True,
return_cluster_intervals=True,
):
"""
Cluster overlapping intervals.
Parameters
----------
df : pandas.DataFrame
min_dist : float or None
If provided, cluster intervals separated by this distance or less.
If None, do not cluster non-overlapping intervals. Using
min_dist=0 and min_dist=None will bring different results.
bioframe uses semi-open intervals, so interval pairs [0,1) and [1,2)
do not overlap, but are separated by a distance of 0. Adjacent intervals
are not clustered when min_dist=None, but are clustered when min_dist=0.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
on : None or list
List of column names to perform clustering on indepdendently, passed as an argument
to df.groupby before clustering. Default is None. An example use would be on=['strand'].
return_input : bool
If True, return input
return_cluster_ids : bool
If True, return ids for clusters
return_cluster_invervals : bool
If True, return clustered interval the original interval belongs to
return_cluster_df : bool
If True, return df_clusters
Returns
-------
df_clustered : pd.DataFrame
"""
if min_dist is not None:
if min_dist < 0:
raise ValueError("min_dist>=0 currently required")
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
_verify_columns(df, [ck, sk, ek])
# Switch to integer indices.
df_index = df.index
df = df.reset_index(drop=True)
# Find overlapping intervals for groups specified by ck1 and on=[] (default on=None)
group_list = [ck]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if ck in on:
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df, on)
group_list += on
df_groups = df.groupby(group_list).groups
cluster_ids = np.full(df.shape[0], -1)
clusters = []
max_cluster_id = -1
for group_keys, df_group_idxs in df_groups.items():
if df_group_idxs.empty:
continue
df_group = df.loc[df_group_idxs]
(
cluster_ids_group,
cluster_starts_group,
cluster_ends_group,
) = arrops.merge_intervals(
df_group[sk].values, df_group[ek].values, min_dist=min_dist
)
interval_counts = np.bincount(cluster_ids_group)
cluster_ids_group += max_cluster_id + 1
n_clusters = cluster_starts_group.shape[0]
max_cluster_id += n_clusters
cluster_ids[df_group_idxs.values] = cluster_ids_group
## Storing chromosome names causes a 2x slowdown. :(
if type(group_keys) is str:
group_keys = tuple((group_keys,))
clusters_group = {}
for col in group_list:
clusters_group[col] = pd.Series(
data=np.full(n_clusters, group_keys[group_list.index(col)]),
dtype=df[col].dtype,
)
clusters_group[sk] = cluster_starts_group
clusters_group[ek] = cluster_ends_group
clusters_group["n_intervals"] = interval_counts
clusters_group = pd.DataFrame(clusters_group)
clusters.append(clusters_group)
assert np.all(cluster_ids >= 0)
clusters = pd.concat(clusters).reset_index(drop=True)
# reorder cluster columns to have chrom,start,end first
clusters_names = list(clusters.keys())
clusters = clusters[
[ck, sk, ek] + [col for col in clusters_names if col not in [ck, sk, ek]]
]
out_df = {}
if return_cluster_ids:
out_df["cluster"] = cluster_ids
if return_cluster_intervals:
out_df["cluster_start"] = clusters[sk].values[cluster_ids]
out_df["cluster_end"] = clusters[ek].values[cluster_ids]
out_df = pd.DataFrame(out_df)
if return_input:
out_df = pd.concat([df, out_df], axis="columns")
out_df.set_index(df_index)
return out_df
def merge(df, min_dist=0, cols=None, on=None):
"""
Merge overlapping intervals.
Parameters
----------
df : pandas.DataFrame
min_dist : float or None
If provided, merge intervals separated by this distance or less.
If None, do not merge non-overlapping intervals. Using
min_dist=0 and min_dist=None will bring different results.
bioframe uses semi-open intervals, so interval pairs [0,1) and [1,2)
do not overlap, but are separated by a distance of 0. Adjacent intervals
are not merged when min_dist=None, but are merged when min_dist=0.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
on : list
List of column names to consider separately for merging, passed as an argument
to df.groupby before merging. Default is ['chrom'], which must match the first name
from cols. Examples for additional columns include 'strand'.
Returns
-------
df_merged : pandas.DataFrame
A pandas dataframe with coordinates of merged clusters.
"""
if min_dist is not None:
if min_dist < 0:
raise ValueError("min_dist>=0 currently required")
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
_verify_columns(df, [ck, sk, ek])
# Find overlapping intervals for groups specified by on=[] (default on=None)
group_list = [ck]
if on is not None:
if type(on) is not list:
raise ValueError("on=[] must be None or list")
if ck in on:
raise ValueError("on=[] should not contain chromosome colnames")
_verify_columns(df, on)
group_list += on
df_groups = df.groupby(group_list).groups
clusters = []
for group_keys, df_group_idxs in df_groups.items():
if df_group_idxs.empty:
continue
df_group = df.loc[df_group_idxs]
(
cluster_ids_group,
cluster_starts_group,
cluster_ends_group,
) = arrops.merge_intervals(
df_group[sk].values, df_group[ek].values, min_dist=min_dist
)
interval_counts = np.bincount(cluster_ids_group)
n_clusters = cluster_starts_group.shape[0]
## Storing chromosome names causes a 2x slowdown. :(
if type(group_keys) is str:
group_keys = tuple((group_keys,))
clusters_group = {}
for col in group_list:
clusters_group[col] = pd.Series(
data=np.full(n_clusters, group_keys[group_list.index(col)]),
dtype=df[col].dtype,
)
clusters_group[sk] = cluster_starts_group
clusters_group[ek] = cluster_ends_group
clusters_group["n_intervals"] = interval_counts
clusters_group = pd.DataFrame(clusters_group)
clusters.append(clusters_group)
clusters = pd.concat(clusters).reset_index(drop=True)
# reorder cluster columns to have chrom,start,end first
clusters_names = list(clusters.keys())
clusters = clusters[
[ck, sk, ek] + [col for col in clusters_names if col not in [ck, sk, ek]]
]
return clusters
def complement(df, chromsizes=None, cols=None):
"""
Find genomic regions that are not covered by any interval.
Parameters
----------
df : pandas.DataFrame
chromsizes : dict
cols : (str, str, str)
The names of columns containing the chromosome, start and end of the
genomic intervals. The default values are 'chrom', 'start', 'end'.
Returns
-------
df_complement : numpy.ndarray
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck, sk, ek = _get_default_colnames() if cols is None else cols
infer_chromsizes = (chromsizes is None)
# Find overlapping intervals per chromosome.
df_groups = df.groupby(ck).groups
if infer_chromsizes:
all_groups = sorted(set(df_groups))
else:
if not set(df_groups).issubset(set(chromsizes.keys())):
raise ValueError(
'Chromsizes are missing some chromosomes from the input interval table.')
all_groups = sorted(set(chromsizes.keys()))
complements = []
for group_keys in all_groups:
# this is a stub for potential on argument
chrom = group_keys
if group_keys not in df_groups:
complement_group = {
ck: pd.Series(
data=[chrom],
dtype=df[ck].dtype,
),
sk: 0,
ek: chromsizes[chrom],
}
complements.append(pd.DataFrame(complement_group))
continue
df_group_idxs = df_groups[group_keys].values
df_group = df.loc[df_group_idxs]
if infer_chromsizes:
chromsize = np.iinfo(np.int64).max
else:
chromsize = chromsizes[chrom]
if chromsize < np.max(df_group[ek].values):
raise ValueError("one or more intervals exceed provided chromsize")
(
complement_starts_group,
complement_ends_group,
) = arrops.complement_intervals(
df_group[sk].values, df_group[ek].values, bounds=(0, chromsize),
)
# Storing chromosome names causes a 2x slowdown. :(
complement_group = {
ck: pd.Series(
data=np.full(complement_starts_group.shape[0], chrom),
dtype=df[ck].dtype,
),
sk: complement_starts_group,
ek: complement_ends_group,
}
complement_group = pd.DataFrame(complement_group)
complements.append(complement_group)
complements = pd.concat(complements).reset_index(drop=True)
return complements
def coverage(df1, df2, return_input=True, cols1=None, cols2=None):
"""
Quantify the coverage of intervals from set 1 by intervals from set2. For every interval
in set 1 find the number of base pairs covered by intervals in set 2.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
return_input : bool
If True, return input as well as computed coverage
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_coverage : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
df2_merged = merge(df2, cols=cols2)
overlap_idxs = overlap(
df1,
df2_merged,
how="left",
return_index=True,
return_overlap=True,
cols1=cols1,
cols2=cols2,
)
overlap_idxs["overlap"] = (
overlap_idxs["overlap_end"] - overlap_idxs["overlap_start"]
)
coverage_sparse_df = overlap_idxs.groupby("index_1").agg({"overlap": "sum"})
out_df = {}
out_df["coverage"] = (
pd.Series(np.zeros_like(df1[sk1]), index=df1.index)
.add(coverage_sparse_df["overlap"], fill_value=0)
.astype(df1[sk1].dtype)
)
out_df = pd.DataFrame(out_df)
if return_input:
out_df = pd.concat([df1, out_df], axis="columns")
return out_df
def _closest_intidxs(
df1,
df2=None,
k=1,
ignore_overlaps=False,
ignore_upstream=False,
ignore_downstream=False,
tie_breaking_col=None,
cols1=None,
cols2=None,
):
"""
For every interval in set 1 find k closest genomic intervals in set2 and
return their integer indices.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
If df2 is None or same object as df1, find closest intervals within the same set.
k_closest : int
The number of closest intervals to report.
cols1, cols2 : (str, str, str)
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
closest_ids : numpy.ndarray
The indices of the overlapping genomic intervals in the original
dataframes. The 1st column contains the indices of intervals
from the 1st set, the 2nd column - the indicies from the 2nd set.
"""
# Allow users to specify the names of columns containing the interval coordinates.
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
self_closest = False
if (df2 is None) or (df2 is df1):
df2 = df1
self_closest = True
# Switch to integer indices.
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
# Find overlapping intervals per chromosome.
df1_groups = df1.groupby(ck1).groups
df2_groups = df2.groupby(ck2).groups
closest_intidxs = []
for group_keys, df1_group_idxs in df1_groups.items():
if group_keys not in df2_groups:
continue
df2_group_idxs = df2_groups[group_keys]
df1_group = df1.loc[df1_group_idxs]
df2_group = df2.loc[df2_group_idxs]
tie_arr = None
if isinstance(tie_breaking_col, str):
tie_arr = df2_group[tie_breaking_col].values
elif callable(tie_breaking_col):
tie_arr = tie_breaking_col(df2_group).values
else:
ValueError(
"tie_breaking_col must be either a column label or "
"f(DataFrame) -> Series"
)
closest_idxs_group = arrops.closest_intervals(
df1_group[sk1].values,
df1_group[ek1].values,
None if self_closest else df2_group[sk2].values,
None if self_closest else df2_group[ek2].values,
k=k,
tie_arr=tie_arr,
ignore_overlaps=ignore_overlaps,
ignore_upstream=ignore_upstream,
ignore_downstream=ignore_downstream,
)
# Convert local per-chromosome indices into the
# indices of the original table.
closest_idxs_group = np.vstack(
[
df1_group_idxs.values[closest_idxs_group[:, 0]],
df2_group_idxs.values[closest_idxs_group[:, 1]],
]
).T
closest_intidxs.append(closest_idxs_group)
if len(closest_intidxs) == 0:
return np.ndarray(shape=(0, 2), dtype=np.int)
closest_intidxs = np.vstack(closest_intidxs)
return closest_intidxs
def closest(
df1,
df2=None,
k=1,
ignore_overlaps=False,
ignore_upstream=False,
ignore_downstream=False,
tie_breaking_col=None,
return_input=True,
return_index=False,
return_distance=True,
return_overlap=False,
suffixes=("_1", "_2"),
cols1=None,
cols2=None,
):
"""
For every interval in set 1 find k closest genomic intervals in set 2.
Note that, unless specified otherwise, overlapping intervals are considered
as closest. When multiple intervals are located at the same distance, the
ones with the lowest index in df2 are chosen.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
If df2 is None, find closest non-identical intervals within the same set.
k : int
The number of closest intervals to report.
ignore_overlaps : bool
If True, return the closest non-overlapping interval.
ignore_upstream : bool
If True, ignore intervals in df2 that are upstream of intervals in df1.
ignore_downstream : bool
If True, ignore intervals in df2 that are downstream of intervals in df1.
tie_breaking_col : str
A column in df2 to use for breaking ties when multiple intervals
are located at the same distance. Intervals with *lower* values will
be selected.
return_input : bool
If True, return input
return_index : bool
If True, return indices
return_distance : bool
If True, return distances. Returns zero for overlaps.
return_overlap = False,
If True, return columns: have_overlap, overlap_start, and overlap_end.
Fills df_closest['overlap_start'] and df['overlap_end'] with pd.NA if non-overlapping.
suffixes : (str, str)
The suffixes for the columns of the two sets.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_closest : pandas.DataFrame
If no intervals found, returns none.
"""
if k < 1:
raise ValueError("k>=1 required")
if df2 is df1:
raise ValueError(
"pass df2=None to find closest non-identical intervals within the same set."
)
# If finding closest within the same set, df2 now has to be set
# to df1, so that the rest of the logic works.
if df2 is None:
df2 = df1
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
closest_df_idxs = _closest_intidxs(
df1,
df2,
k=k,
ignore_overlaps=ignore_overlaps,
ignore_upstream=ignore_upstream,
ignore_downstream=ignore_downstream,
tie_breaking_col=tie_breaking_col,
cols1=cols1,
cols2=cols2,
)
if len(closest_df_idxs) == 0:
return # case of no closest intervals
# Generate output tables.
df_index_1 = None
df_index_2 = None
if return_index:
index_col = return_index if isinstance(return_index, str) else "index"
df_index_1 = pd.DataFrame(
{index_col + suffixes[0]: df1.index[closest_df_idxs[:, 0]]}
)
df_index_2 = pd.DataFrame(
{index_col + suffixes[1]: df2.index[closest_df_idxs[:, 1]]}
)
df_overlap = None
if return_overlap:
overlap_start = np.amax(
np.vstack(
[
df1[sk1].values[closest_df_idxs[:, 0]],
df2[sk2].values[closest_df_idxs[:, 1]],
]
),
axis=0,
)
overlap_end = np.amin(
np.vstack(
[
df1[ek1].values[closest_df_idxs[:, 0]],
df2[ek2].values[closest_df_idxs[:, 1]],
]
),
axis=0,
)
have_overlap = overlap_start < overlap_end
df_overlap = pd.DataFrame({
"have_overlap" : have_overlap,
"overlap_start" : np.where(have_overlap, overlap_start, pd.NA),
"overlap_end": np.where(have_overlap, overlap_end, pd.NA)
})
df_distance = None
if return_distance:
distance_left = np.maximum(
0,
df1[sk1].values[closest_df_idxs[:, 0]]
- df2[ek2].values[closest_df_idxs[:, 1]],
)
distance_right = np.maximum(
0,
df2[sk2].values[closest_df_idxs[:, 1]]
- df1[ek1].values[closest_df_idxs[:, 0]],
)
distance = np.amax(np.vstack([distance_left, distance_right]), axis=0)
df_distance = pd.DataFrame({
"distance" : distance
})
df_input_1 = None
df_input_2 = None
if return_input is True or str(return_input) == "1" or return_input == "left":
df_input_1 = df1.iloc[closest_df_idxs[:, 0]].reset_index(drop=True)
df_input_1.columns = [c + suffixes[0] for c in df_input_1.columns]
if return_input is True or str(return_input) == "2" or return_input == "right":
df_input_2 = df2.iloc[closest_df_idxs[:, 1]].reset_index(drop=True)
df_input_2.columns = [c + suffixes[1] for c in df_input_2.columns]
out_df = pd.concat([
df_index_1,
df_input_1,
df_index_2,
df_input_2,
df_overlap,
df_distance], axis="columns")
return out_df
def subtract(df1, df2, cols1=None, cols2=None):
"""
Generate a new set of genomic intervals by subtracting the second set of genomic intervals from the first.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_subtracted : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
name_updates = {ck1 + "_1": "chrom", "overlap_start": "start", "overlap_end": "end"}
extra_columns_1 = [i for i in list(df1.columns) if i not in [ck1, sk1, ek1]]
for i in extra_columns_1:
name_updates[i + "_1"] = i
### loop over chromosomes, then either return the same or subtracted intervals.
df1_groups = df1.groupby(ck1).groups
df2_groups = df2.groupby(ck2).groups
df_subtracted = []
for group_keys, df1_group_idxs in df1_groups.items():
df1_group = df1.loc[df1_group_idxs]
# if nothing to subtract, add original intervals
if group_keys not in df2_groups:
df_subtracted.append(df1_group)
continue
df2_group_idxs = df2_groups[group_keys]
df2_group = df2.loc[df2_group_idxs]
df_subtracted_group = overlap(
df1_group, complement(df2_group), how="inner", return_overlap=True
)[list(name_updates)]
df_subtracted.append(df_subtracted_group.rename(columns=name_updates))
df_subtracted = pd.concat(df_subtracted)
return df_subtracted
def setdiff(df1, df2, cols1=None, cols2=None, on=None):
"""
Generate a new dataframe of genomic intervals by removing any interval from the
first dataframe that overlaps an interval from the second dataframe.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as DataFrames.
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : None or list
Additional column names to perform clustering on indepdendently, passed as an argument
to df.groupby when considering overlaps and must be present in both dataframes.
Examples for additional columns include 'strand'.
Returns
-------
df_setdiff : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols1 is None else cols1
ck2, sk2, ek2 = _get_default_colnames() if cols2 is None else cols2
df_overlapped = _overlap_intidxs(
df1, df2, how="inner", cols1=cols1, cols2=cols2, on=on
)
inds_non_overlapped = np.setdiff1d(np.arange(len(df1)), df_overlapped[:, 0])
df_setdiff = df1.iloc[inds_non_overlapped]
return df_setdiff
def split(
df,
points,
cols=None,
cols_points=None,
add_names=False,
suffixes=["_left", "_right"],
):
"""
Generate a new dataframe of genomic intervals by splitting each interval from the
first dataframe that overlaps an interval from the second dataframe.
Parameters
----------
df : pandas.DataFrame
Genomic intervals stored as a DataFrame.
points : pandas.DataFrame or dict
If pandas.DataFrame, a set of genomic positions specified in columns 'chrom', 'pos'.
Names of cols can be overwridden by cols_points.
If dict, mapping of chromosomes to positions.
cols : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
Returns
-------
df_split : pandas.DataFrame
"""
ck1, sk1, ek1 = _get_default_colnames() if cols is None else cols
ck2, sk2 = ("chrom", "pos") if cols_points is None else cols_points
name_updates = {ck1 + "_1": "chrom", "overlap_start": "start", "overlap_end": "end"}
if add_names:
name_updates["index_2"] = "index_2"
return_index = True
else:
return_index = False
extra_columns_1 = [i for i in list(df.columns) if i not in [ck1, sk1, ek1]]
for i in extra_columns_1:
name_updates[i + "_1"] = i
if isinstance(points, dict):
points = pd.DataFrame.from_dict(points, orient="index", columns=[sk2])
points.reset_index(inplace=True)
points.rename(columns={"index": "chrom"}, inplace=True)
elif not isinstance(points, pd.DataFrame):
raise ValueError("points must be a dict or pd.Dataframe")
points["start"] = points[sk2]
points["end"] = points[sk2]
df_split = overlap(
df,
complement(points),
how="inner",
cols1=cols,
cols2=(ck2, "start", "end"),
return_overlap=True,
return_index=return_index,
)[list(name_updates)]
df_split.rename(columns=name_updates, inplace=True)
if add_names:
df_split = regions_add_name_column(df_split)
sides = np.mod(df_split["index_2"].values, 2).astype(int) # .astype(str)
df_split["name"] = df_split["name"].values + np.array(suffixes)[sides]
df_split.drop(columns=["index_2"])
return df_split
def count_overlaps(
df1, df2, cols1=None, cols2=None, on=None,
):
"""
Count number of overlapping genomic intervals.
Parameters
----------
df1, df2 : pandas.DataFrame
Two sets of genomic intervals stored as a DataFrame.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
return_input : bool
If True, return columns from input dfs. Default True.
suffixes : (str, str)
The suffixes for the columns of the two overlapped sets.
keep_order : bool
<< to be documented >>
cols1, cols2 : (str, str, str) or None
The names of columns containing the chromosome, start and end of the
genomic intervals, provided separately for each set. The default
values are 'chrom', 'start', 'end'.
on : list
List of column names to check overlap on indepdendently, passed as an argument
to df.groupby when considering overlaps. Default is None. Examples for additional columns include 'strand'.
Returns
-------
df_counts : pandas.DataFrame
"""
df_counts = overlap(
df1,
df2,
how="left",
return_input=False,
keep_order=True,
return_index=True,
on=on,
cols1=cols1,
cols2=cols2,
)
df_counts = pd.concat(
[
df1,
pd.DataFrame(
df_counts.groupby(["index_1"])["index_2"].count().values,
columns=["count"],
),
],
axis=1,
names=["count"],
)
return df_counts
|
import unittest
from app.models.products import *
class TestProductprod_records(unittest.TestCase):
def setUp(self):
self.prod_record = Products(0,'crisps','snacks','4cartons',500)
return self.prod_record
def test_prod_id(self):
# Tests that the id is equal to the given id
self.assertNotEqual(self.prod_record.prod_id, 1)
self.assertNotEqual(self.prod_record.prod_id, "str")
self.assertEqual(self.prod_record.prod_id, 0)
def test_prod_id_data_type(self):
self.assertNotIsInstance(self.prod_record.prod_id, str)
self.assertNotIsInstance(self.prod_record.prod_id, float)
self.assertIsInstance(self.prod_record.prod_id, int)
def test_prod_name(self):
'''tests the product name given in the new prod_record'''
self.assertEqual(self.prod_record.prod_name, 'crisps')
self.assertNotEqual(self.prod_record.prod_name, 'code')
def test_prod_name_datatype(self):
'''tests the datatype of the prod_name'''
self.assertNotIsInstance(self.prod_record.prod_name, int)
self.assertNotIsInstance(self.prod_record.prod_name, float)
self.assertIsInstance(self.prod_record.prod_name, str)
def test_prod_category(self):
'''Tests the name of the prod_category'''
self.assertEqual(self.prod_record.prod_category, 'snacks')
self.assertNotEqual(self.prod_record.prod_category, 'lubwama')
def test_prod_category_type(self):
'''Tessts the datatype of the prod_category'''
self.assertNotIsInstance(self.prod_record.prod_category, int)
self.assertNotIsInstance(self.prod_record.prod_category, float)
self.assertIsInstance(self.prod_record.prod_category, str)
def test_prod_quantity(self):
# Tests that the prod_quantity is equal to the given quantity
self.assertEqual(self.prod_record.prod_quantity, '4cartons')
self.assertNotEqual(self.prod_record.prod_quantity, 'carton')
self.assertNotEqual(self.prod_record.prod_quantity, '1 pkt')
def test_prod_quantity_datatype(self):
'''Tests the datatype of the product quantity'''
self.assertNotIsInstance(self.prod_record.prod_quantity, int)
self.assertNotIsInstance(self.prod_record.prod_quantity, float)
self.assertNotIsInstance(self.prod_record.prod_quantity, dict)
self.assertIsInstance(self.prod_record.prod_quantity, str)
def test_unit_cost(self):
'''Tests the unit_cost of the product'''
self.assertEqual(self.prod_record.unit_cost, 500)
self.assertNotEqual(self.prod_record.unit_cost,1500)
def test_unit_cost_datatype(self):
''' Tests the datatype of the unit_cost'''
self.assertNotIsInstance(self.prod_record.unit_cost, str)
self.assertNotIsInstance(self.prod_record.unit_cost, float)
self.assertIsInstance(self.prod_record.unit_cost, int)
|
from pykiwoom.kiwoom import *
kiwoom = Kiwoom()
kiwoom.CommConnect(block=True)
전일가 = kiwoom.GetMasterLastPrice("005930")
print(int(전일가))
print(type(전일가))
|
# stickit.py
# Copyright (c) 2017 Mads Hagbarth Damsbo. All Rights Reserved.
# Not for redistribution.
import nuke
import nuke.splinewarp as sw
import string #This is used by the code. Include!
import math
import struct
import time
'''
REAL TODO:
General Feedback:
In regards to assist its hard to figure if something have been sucessfully been applied or not.
This could be fixed by making a green label with "Roto Node Assisted", then fade to grey using a timer.
MUST HAVE:
-Hook up the "Output" options to their respective nodes
in that regard remove the "hide source" option
-Check the buttons in the buttom of the advanced tab and remove the ones that we no longer need.
-Test the heck out of different formats and what not to enure that we don't get bounding issues.
-option to invert input mask!
-The ST map should source its X and Y from the Source image, not the overlay image!
--Add a option to only source the alpha from the Overlay (so that if the source have a alpha it won't carry through.)
Add another ST frame offset for calculating center motionblur. (or should it be hacked using offset?)
NICE TO HAVE:
Add a button for the ref frame called "Current Frame"
Add a status that show if the shot have been tracked and solved (solve range aswell).
This should also highlight if the "Disable Warp" have been toggeled
Add some Analyze/Tracking presets (ie; Full Frame, Medium Object, Small Object)
Add a button that generates a ST-Map node and a vectorblur node outside the node. (Create ST Setup)
DONE
+Make sure that the ST map matches. (check again!)
FOR CONSIDERATION:
Consider a workflow to work with fully obscured regions (guides maybe)
Post filter that takes all the keyframes created and does eighter smoothing or reduces the number of keyframes to the bare minimum (based on a threshold)
Calculate the length of the vector
Devide each axis by the length to get a normalized vector
First do a check to ensure that the vector is shorter than the threshold
Then comput a direction that is the averaged normalized vector from the frames that we inspect
If the computed direction is within the threshhold then...
one thing that there is to consider is that you may want to not write down the keyframes first.
for example in a roto or rotopaint workflow, it might be smart to gther the keyframes, filter, then apply them to the spline.
Save the animation curve.
Add a interpolate range feature.
-You set the in and out point.
-You hit interpolate (it will do a linear interpolation between the points)
-Now the user can retrack forward
'''
'''
#Todo:
-Get all points from the CameraTracker and put them into a list
-Create a initial set of points, this could be all points on a certain frame or a general set of points.
If we take from a certain frame we need to get a list of all points that are on the specified frame
-For every source object we put in a single value that is the XY pos of the object in the specified frame
-For every target object we triangulate the nearby points and get a new position, we do that for all the frames in the framerange specified.
-For every target object we bake animation calculated in the step above
Remember that we must save the new calculated position into a new list or modify the exsisting to get perfect results
'''
'''================================================================================
; Function: CreateWarpPinPair(myNode):
; Description: Create a Splinewarp pin pair.
; Parameter(s): node - The node create pin in
; Return(s): Returns a pair of pin objects (_curveknob.Stroke objects) [source,target]
; specified - Only take knobs with this tag (like "UserTrack" from a cameratracker)
; Note(s): N/A
;=================================================================================='''
def CreateWarpPinPair(myNode,pointlist,refframe):
ItemX = pointlist
#First we want to clear the current splinewarp.
#As there is no build-in function to do this, we just purge it with default data
warpCurve = myNode['curves']
warpRoot = warpCurve.rootLayer
Header = """AddMode 0 0 1 0 {{v x3f99999a}
{f 0}
{n
{layer Root
{f 0}
{t x44800000 x44428000}
{a pt1x 0 pt1y 0 pt2x 0 pt2y 0 pt3x 0 pt3y 0 pt4x 0 pt4y 0 ptex00 0 ptex01 0 ptex02 0 ptex03 0 ptex10 0 ptex11 0 ptex12 0 ptex13 0 ptex20 0 ptex21 0 ptex22 0 ptex23 0 ptex30 0 ptex31 0 ptex32 0 ptex33 0 ptof1x 0 ptof1y 0 ptof2x 0 ptof2y 0 ptof3x 0 ptof3y 0 ptof4x 0 ptof4y 0 pterr 0 ptrefset 0 ptmot x40800000 ptref 0}}}}
"""
warpCurve.fromScript(Header)
warpCurve.changed()
#As we just cleared the curves knob we need to re-fetch it.
#If we don't do this Nuke will crash in some cases.
#This should be reported to TheFoundry.
warpCurve = myNode['curves']
warpRoot = warpCurve.rootLayer
#We start off by creating all the pins that we need.
#We do this in 2 steps. First we create the src then the dst
for i in range(0, len(ItemX)):
PinSource = sw.Shape(warpCurve, type="bezier") #single point distortion
newpoint = sw.ShapeControlPoint() #create point
ConvertedX = float(pointlist[i][int(refframe-float(pointlist[i][0][0]))][1])
ConvertedY = float(pointlist[i][int(refframe-float(pointlist[i][0][0]))][2])
newpoint.center = (ConvertedX,ConvertedY) #set center position
newpoint.leftTangent = (0,0) #set left tangent relative to center
newpoint.rightTangent = (0,0) #set right tangent relative to center
PinSource.append(newpoint) #add point to shape
PinTarget = sw.Shape(warpCurve, type="bezier") #single point distortion
newpointB = sw.ShapeControlPoint() #create point
newpointB.center = (ConvertedX,ConvertedY) #set center position
newpointB.leftTangent = (0,0) #set left tangent relative to center
newpointB.rightTangent = (0,0) #set right tangent relative to center
PinTarget.append(newpointB) #add point to shape
warpRoot.append(PinSource) #add to the rootLayer
warpRoot.append(PinTarget) #add to the rootLayer
warpCurve.defaultJoin(PinSource,PinTarget)
warpCurve.changed() #Update the curve
PinSource.getTransform().getTranslationAnimCurve(0).removeAllKeys()
PinSource.getTransform().getTranslationAnimCurve(1).removeAllKeys()
PinTarget.getTransform().getTranslationAnimCurve(0).removeAllKeys()
PinTarget.getTransform().getTranslationAnimCurve(1).removeAllKeys()
PinSource.getTransform().addTranslationKey(refframe,0,0,100.0)
for ix in range(0, len(pointlist[i])):
PinTarget.getTransform().getTranslationAnimCurve(0).addKey(pointlist[i][ix][0],float(pointlist[i][ix][1])-float(pointlist[i][int(refframe-float(pointlist[i][0][0]))][1]))
PinTarget.getTransform().getTranslationAnimCurve(1).addKey(pointlist[i][ix][0],float(pointlist[i][ix][2])-float(pointlist[i][int(refframe-float(pointlist[i][0][0]))][2]))
#print pointlist[i][ix][0]
warpCurve.changed() #Update the curve
CurrentData = warpCurve.toScript().replace('{f 8192}','{f 8224}') #Convert to splinewarp pins
warpCurve.fromScript(CurrentData)
'''================================================================================
; Function: ExportCameraTrack(myNode):
; Description: Extracts all 2D Tracking Featrures from a 3D CameraTracker node (not usertracks).
; Parameter(s): myNode - A CameraTracker node containing tracking features
; Return: Output - A list of points formated [ [[Frame,X,Y][...]] [[...][...]] ]
;
; Note(s): N/A
;=================================================================================='''
def ExportCameraTrack(myNode):
myKnob = myNode.knob("serializeKnob")
myLines = myKnob.toScript()
DataItems = string.split(myLines, '\n')
Output = []
for index,line in enumerate(DataItems):
tempSplit = string.split(line, ' ')
if (len(tempSplit) > 4 and tempSplit[ len(tempSplit)-1] == "10") or (len(tempSplit) > 6 and tempSplit[len(tempSplit)-1] == "10"): #Header
#The first object always have 2 unknown ints, lets just fix it the easy way by offsetting by 2
if len(tempSplit) > 6 and tempSplit[6] == "10":
offsetKey = 2
offsetItem = 0
else:
offsetKey = 0
offsetItem = 0
#For some wierd reason the header is located at the first index after the first item. So we go one step down and look for the header data.
itemHeader = DataItems[index+1]
itemHeadersplit = string.split(itemHeader, ' ')
itemHeader_UniqueID = itemHeadersplit[1]
#So this one is rather wierd but after a certain ammount of items the structure will change again.
backofs = 0
lastofs = 0
firstOffset = 0
secondOffset = 0
secondItem = DataItems[index+2]
secondSplit = string.split(secondItem, ' ')
if len(secondSplit) == 7:
firstOffset = 0
if len(itemHeadersplit) == 3:
itemHeader = DataItems[index+2]
itemHeadersplit = string.split(itemHeader, ' ')
offsetKey = 2
offsetItem = 2
if len(secondSplit) == 11:
firstOffset = 1 #In this case the 2nd item will be +1
backofs = 1
elif len(secondSplit) == 7:
firstOffset = 1
else:
firstOffset = 0 #In this case the 2nd item will be +0
itemHeader_FirstItem = itemHeadersplit[3+offsetItem]
itemHeader_NumberOfKeys = itemHeadersplit[4+offsetKey]
#Here we extract the individual XY coordinates
PositionList =[]
PositionList.append([LastFrame+(0),float(string.split(DataItems[index+0], ' ')[2]) ,float(string.split(DataItems[index+0], ' ')[3])])
for x in range(2,int(itemHeader_NumberOfKeys)+1):
if len(string.split(DataItems[index+x+firstOffset-1], ' '))>7 and len(string.split(DataItems[index+x+firstOffset-1], ' '))<10 and int(string.split(DataItems[index+x+firstOffset-1], ' ')[5]) > 0:
Offset = int(string.split(DataItems[index+x+firstOffset-1], ' ')[7])
PositionList.append([LastFrame+(x-1),float(string.split(DataItems[Offset+1], ' ')[2]) ,float(string.split(DataItems[Offset+1], ' ')[3])])
secondOffset = 1
else:
if x==(int(itemHeader_NumberOfKeys)) and backofs == 1:
PositionList.append([LastFrame+(x-1),float(string.split(DataItems[int(lastofs)], ' ')[2] ) ,float(string.split(DataItems[int(lastofs)], ' ')[3])])
else:
PositionList.append([LastFrame+(x-1),float(string.split(DataItems[index+x+firstOffset-secondOffset], ' ')[2] ) ,float(string.split(DataItems[index+x+firstOffset-secondOffset], ' ')[3])])
if len(string.split(DataItems[index+x+firstOffset+secondOffset], ' ')) > 5 and len(string.split(DataItems[index+x+firstOffset+secondOffset], ' ')) < 16:
lastofs = str(string.split(DataItems[index+x+firstOffset+secondOffset], ' ')[5])
else:
lastofs = index+x+1
Output.append(PositionList)
elif (len(tempSplit) > 8 and tempSplit[1] == "0" and tempSplit[2] == "1"):
LastFrame = int(tempSplit[3])
else: #Content
pass
return Output
'''================================================================================
; Function: GetAnimtionList(myList,myFrame):
; Description: Returns a list of points that contain animation between myFrame and the following frame
; Parameter(s): myList - A list of points formated [ [[Frame,X,Y][...]] [[...][...]] ]
myFrame - The frame to take into consideration
; Return: Output - A list of points formated [ [[Frame,X,Y][...]] [[...][...]] ]
;
; Note(s): N/A
;=================================================================================='''
def GetAnimtionList(myList,nestedPoints,myFrame,_rev=False,_ofs=False):
Output = []
thisFrame = int(myFrame)
try:
if _rev: #This will reverse the output
for i,item in enumerate(nestedPoints[thisFrame]):
if nestedPoints[thisFrame][i][4]>thisFrame:
outThisframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])]
outNextframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])+1]
Output.append([outNextframe,outThisframe])
elif _ofs: #This is a temporary fix to the strange offset bug
for i,item in enumerate(nestedPoints[thisFrame]):
if nestedPoints[thisFrame][i][4]>thisFrame:
outThisframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])]
outNextframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])+1]
outThisframe = [outThisframe[0],outThisframe[1]+1,outThisframe[2]+0.01]
outNextframe = [outNextframe[0],outNextframe[1]+1,outNextframe[2]+0.01]
Output.append([outNextframe,outThisframe])
else:
for i,item in enumerate(nestedPoints[thisFrame]):
if nestedPoints[thisFrame][i][4]>thisFrame:
outThisframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])]
outNextframe = myList[nestedPoints[thisFrame][i][2]][(thisFrame-nestedPoints[thisFrame][i][3])+1]
Output.append([outThisframe,outNextframe])
except:
pass #No points on this frame!
return Output
'''================================================================================
; Function: GetNearestPoints(myList,myFrame):
; Note(s): N/A
;=================================================================================='''
def GetNearestPoints(refpoint,pointList,_rev=False):
if len(pointList) < 3:
xOffset = 0.0
yOffset = 0.0
else:
#Distance Calculation
x1 = refpoint[1]
y1 = refpoint[2]
distancelist = []
#Check if there is more than 3 points at the current frame.
for item in pointList: #Does it read from the same frame or a new one?
#print item
x2 = item[0][1]
y2 = item[0][2]
dist = math.hypot(x2-x1, y2-y1)
distancelist.append(dist+1)
sorted_lookup = sorted(enumerate(distancelist), key=lambda i:i[1])
index0 = sorted_lookup[0][0]
index1 = sorted_lookup[1][0]
index2 = sorted_lookup[2][0]
perc0 = 1 / (sorted_lookup[0][1])
perc1 = 1 / (sorted_lookup[1][1])
perc2 = 1 / (sorted_lookup[2][1])
if perc0 == 1:
perc1 = 0
perc2 = 0
perctotal = perc0+perc1+perc2
Percent0 = perc0 if perctotal == 0 else perc0 / (perctotal)
Percent1 = perc1 if perctotal == 0 else perc1 / (perctotal)
Percent2 = perc2 if perctotal == 0 else perc2 / (perctotal)
x02 = pointList[index0][1][1]
y02 = pointList[index0][1][2]
x12 = pointList[index1][1][1]
y12 = pointList[index1][1][2]
x22 = pointList[index2][1][1]
y22 = pointList[index2][1][2]
x01 = pointList[index0][0][1]
y01 = pointList[index0][0][2]
x11 = pointList[index1][0][1]
y11 = pointList[index1][0][2]
x21 = pointList[index2][0][1]
y21 = pointList[index2][0][2]
xOffset = (((x02-x01) * Percent0) + (( x12-x11) * Percent1) + (( x22-x21) * Percent2))
yOffset = (((y02-y01) * Percent0) + (( y12-y11) * Percent1) + ((y22-y21) * Percent2))
return [xOffset, yOffset]
def GrabListData():
Node = nuke.toNode("si_ct") #change this to your tracker node!
#01: Get all points from the cameratracker node.
_return = ExportCameraTrack(Node)
#02: To optimize the lookups we index all the data into frame lists containing [x,y,index,firstframe,lastframe]
# this will give a 40+ times performence boost.
item_dict = {}
for list_index, big_lst in enumerate(_return):
for lst in big_lst:
if lst[0] in item_dict:
item_dict[lst[0]] += [lst[1:]+[list_index]+[_return[list_index][0][0], _return[list_index][len(_return[list_index])-1][0]],] # Append
else:
item_dict[lst[0]] = [lst[1:]+[list_index]+[_return[list_index][0][0], _return[list_index][len(_return[list_index])-1][0]],] # Initialize
return [_return,item_dict]
'''================================================================================
Simple median.
;=================================================================================='''
def median(lst):
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1])/2.0
'''================================================================================
RangeKeeper, use to store and calculate frame ranges.
;=================================================================================='''
class rangeKeeper():
def __init__(self,_type):
self.frameForRef = 0
self.StartFrame = 0
self.EndFrame = 0
self.type = _type
self.appendAnimation = False
self.initvalues()
def initvalues(self):
self.appendAnimation = nuke.thisNode().knob("appendAnimation").value()
if nuke.thisNode().knob("assistStep").value(): #We only run "x" number of frames
_thisFrame = nuke.frame()
_startFrame = _thisFrame - int(nuke.thisNode().knob("AssistStepSize").value())
_endFrame = _thisFrame + int(nuke.thisNode().knob("AssistStepSize").value())
else: #We run the full range
_thisFrame = nuke.frame()
_startFrame = int(nuke.thisNode().knob("InputFrom").value())
_endFrame = int(nuke.thisNode().knob("InputTo").value())
if self.type == 0: #Both Ways
self.frameForRef = _thisFrame
self.StartFrame = _startFrame
self.EndFrame = _endFrame
elif self.type == 1: #Only forward
self.frameForRef = _thisFrame
self.StartFrame = _thisFrame
self.EndFrame = _endFrame
elif self.type == 2: #Only Backwards
self.frameForRef = _thisFrame
self.StartFrame = _startFrame
self.EndFrame = _thisFrame
'''================================================================================
; Function: KeyframeReducer():
; Description: Removes unwanted keyframes based on a threshold
;
; Note(s): _method #0 = Local, 1 = Median, 2 = Average
;=================================================================================='''
def KeyframeReducer(knob):
myKnob = knob
threshold = 1.5
firstFrame = 1
lastFrame = 99
reduce = False
for frame in range(firstFrame,lastFrame):
xd = myKnob.getValueAt(frame)[0]-myKnob.getValueAt(frame+1)[0]
yd = myKnob.getValueAt(frame)[1]-myKnob.getValueAt(frame+1)[1]
delta = math.sqrt(xd*xd+yd*yd)
if delta < threshold:
if reduce:
print "Reduce this",frame
if myKnob.isAnimated():
myKnob.removeKeyAt(frame)
else:
print help(myKnob.setValueAt)
#myKnob.removeKeyAt(frame)
#myKnob.setValueAt(myKnob.getValueAt(frame)[0],frame,0)
#myKnob.setValueAt(myKnob.getValueAt(frame)[1],frame,1)
reduce = True
print frame, delta
else:
reduce = False
'''================================================================================
; Function: Solve2DTransform():
; Description: Used to solve the trackers in a 2dtracker node.
;
; Note(s): _method #0 = Local, 1 = Median, 2 = Average
;=================================================================================='''
def CalculatePositionDelta(_method,_refpointList,temp_pos=[0,0]):
if _method == 0: #If we use local interpolation
newOffset = GetNearestPoints([0,temp_pos[0],temp_pos[1]],_refpointList)
_x3 = newOffset[0]
_y3 = newOffset[1]
elif _method == 1: #If we use global median interpolation
xlist = [] #Init the lists
ylist = []
for items in _refpointList:
xlist.append(float(items[1][1])-float(items[0][1])) #Add the motion delta to the list
ylist.append(float(items[1][2])-float(items[0][2]))
_x3 = median(xlist) #Calculate median
_y3 = median(ylist)
else: #If we use global average interpolcation
_x3 = 0 #Init the value
_y3 = 0
for items in _refpointList:
_x3 += float(items[1][1])-float(items[0][1]) #Calculate motion delta
_y3 += float(items[1][2])-float(items[0][2])
_x3 = _x3/(len(_refpointList)+0.00001) #Devide by item cound to get average
_y3 = _y3/(len(_refpointList)+0.00001)
return [_x3,_y3]
'''
thisFrame = nuke.frame()
myNode =nuke.toNode("Transform15")
myKnob = myNode["translate"]
animationsX = myKnob.animation(0) #X-axis animations
animationsY = myKnob.animation(1) #Y-axis animations
animationList = [] #List to contain the animationnlist
preProcessList = []
postProcessList = []
for x,keys in enumerate(animationsX.keys()):
if keys.x<thisFrame:
preProcessList.append([keys.x,keys.y,animationsY.keys()[x].y])
else:
postProcessList.append([keys.x,keys.y,animationsY.keys()[x].y])
print preProcessList
print postProcessList
'''
'''================================================================================
; Function: Solve2DTransform():
; Description: Used to solve the trackers in a 2dtracker node.
;
; Note(s): N/A
;=================================================================================='''
def Solve2DTransform(_node):
global RangeKeeper
#Define Variables
solve_method = int(nuke.thisNode().knob("AssistType").getValue()) #0 = Local, 1 = Median, 2 = Average
frameForRef = RangeKeeper.frameForRef
StartFrame = RangeKeeper.StartFrame
EndFrame = RangeKeeper.EndFrame
myNode = _node
myKnob = myNode.knob("translate")
myKnobCenter = myNode.knob("center")
useExsistingKeyframes = True
#Set some initial defaults
init_pos = [0,0]
center_pos = [0,0]
temp_pos = [0,0]
_xy = [0,0]
frameindex = 0
#Read data from the knobs
PointData = GrabListData()
init_pos = myKnob.getValue()
center_pos = myKnobCenter.getValue()
if useExsistingKeyframes:
animationsX = myKnob.animation(0) #X-axis animations
animationsY = myKnob.animation(1) #Y-axis animations
if not animationsY or not animationsX:
useExsistingKeyframes = 0
else:
preProcessList = [] #Initialize array
postProcessList = [] #Initialize array
for x,keys in enumerate(animationsX.keys()):
if keys.x<frameForRef: #If the item is below the refframe it should be processed in the back process
preProcessList.append([keys.x,keys.y,animationsY.keys()[x].y])
else:
postProcessList.append([keys.x,keys.y,animationsY.keys()[x].y])
frameindex = len(preProcessList)-1 #The next keyframe to compare with is the last in the stack when going backwards
#Clear animation
if not RangeKeeper.appendAnimation:
myKnob.clearAnimated() #Only if overwrite!!
myKnob.setAnimated(0)
myKnob.setAnimated(1)
#Re-write the initial position
myKnob.setValueAt(init_pos[0],frameForRef,0)
myKnob.setValueAt(init_pos[1],frameForRef,1)
#Substract the center position of the transform node
init_pos[0] += center_pos[0]
init_pos[1] += center_pos[1]
temp_pos = init_pos
#--------------------------
#Resolve backwards [<-----]
for frame in reversed(range(StartFrame,frameForRef)):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame,True)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
myKnob.setValueAt(temp_pos[0]-center_pos[0],frame,0) #Add a keyframe with the values
myKnob.setValueAt(temp_pos[1]-center_pos[1],frame,1)
if frameindex >=0 and useExsistingKeyframes:
if frame == preProcessList[frameindex][0]:
tempX = preProcessList[frameindex][1]
tempY = preProcessList[frameindex][2]
print "Reached keyframe",preProcessList[frameindex][0],preProcessList[frameindex][1],preProcessList[frameindex][2]
print "Dif:", tempX-(temp_pos[0]-center_pos[0]),tempY-(temp_pos[1]-center_pos[1])
frameindex -= 1
#-------------------------
#Resolve forwards [----->]
temp_pos = init_pos
for frame in range(frameForRef,EndFrame):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
myKnob.setValueAt(temp_pos[0]-center_pos[0],frame+1,0)
myKnob.setValueAt(temp_pos[1]-center_pos[1],frame+1,1)
'''================================================================================
; Function: SolveCornerpin():
; Description: Used to solve the points of a cornerpin
;
; Note(s): N/A
;=================================================================================='''
def SolveCornerpin(_node):
#Define Variables
solve_method = int(nuke.thisNode().knob("AssistType").getValue()) #0 = Local, 1 = Median, 2 = Average
frameForRef = nuke.frame()
StartFrame = int(nuke.thisNode().knob("InputFrom").value())
EndFrame = int(nuke.thisNode().knob("InputTo").value())
myNode = _node
myKnob = myNode.knob("translate")
myKnobCenter = myNode.knob("center")
#Set some initial defaults
init_pos = [0,0]
center_pos = [0,0]
temp_pos = [0,0]
_xy = [0,0]
#Read data from the knobs
knobs = [myNode['to1'],myNode['to2'],myNode['to3'],myNode['to4']]
RefPointList = []
for myKnob in knobs:
init_pos = myKnob.getValue()
RefPointList.append([init_pos,myKnob])
myKnob.clearAnimated() #Only if overwrite!!
myKnob.setAnimated(0)
myKnob.setAnimated(1)
myKnob.setValueAt(init_pos[0],frameForRef,0)
myKnob.setValueAt(init_pos[1],frameForRef,1)
PointData = GrabListData()
for item in RefPointList:
temp_pos = item[0]
myKnob = item[1]
#--------------------------
#Resolve backwards [<-----]
for frame in reversed(range(StartFrame,frameForRef)):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame,True)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
myKnob.setValueAt(temp_pos[0]-center_pos[0],frame,0) #Add a keyframe with the values
myKnob.setValueAt(temp_pos[1]-center_pos[1],frame,1)
#-------------------------
#Resolve forwards [----->]
temp_pos = item[0]
for frame in range(frameForRef,EndFrame):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
myKnob.setValueAt(temp_pos[0]-center_pos[0],frame+1,0)
myKnob.setValueAt(temp_pos[1]-center_pos[1],frame+1,1)
'''================================================================================
; Function: SolveCurves():
; Description: Used to solve the curves knobs (like roto, rotopaint and splinewarps)
;
; Note(s): N/A
;=================================================================================='''
def SolveCurves(_node,_isSplineWarp=False):
#Define Variables
solve_method = int(nuke.thisNode().knob("AssistType").getValue()) #0 = Local, 1 = Median, 2 = Average
frameForRef = nuke.frame()
StartFrame = int(nuke.thisNode().knob("InputFrom").value())
EndFrame = int(nuke.thisNode().knob("InputTo").value())
myNode = _node
myKnob = myNode.knob("translate")
myKnobCenter = myNode.knob("center")
#Set some initial defaults
init_pos = [0,0]
center_pos = [0,0]
temp_pos = [0,0]
_xy = [0,0]
#Read data from the knobs
RefPointListInt=[]
for item in _node["curves"].getSelected(): #Only apply to selected roto items
for subitem in item:
try:
RefPointListInt.append([subitem.center.getPosition(frameForRef)[0],subitem.center.getPosition(frameForRef)[1],subitem.center])
except:
RefPointListInt.append([subitem.getPosition(frameForRef)[0],subitem.getPosition(frameForRef)[1],subitem])
PointData = GrabListData()
for item in RefPointListInt:
temp_pos = [item[0],item[1]]
print "tempbos:",temp_pos
centerPoint = item[2]
#--------------------------
#Resolve backwards [<-----]
for frame in reversed(range(StartFrame,frameForRef)):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame,True)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
centerPoint.addPositionKey(frame,[temp_pos[0],temp_pos[1] ]) #Add a keyframe with the values
#-------------------------
#Resolve forwards [----->]
temp_pos = [item[0],item[1]]
for frame in range(frameForRef,EndFrame):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
centerPoint.addPositionKey(frame+1,[temp_pos[0],temp_pos[1] ]) #Add a keyframe with the values
'''================================================================================
; Function: Solve2DTracker():
; Description: Used to solve the trackers in a 2dtracker node.
;
; Note(s): N/A
;=================================================================================='''
def Solve2DTracker(_node):
#Define Variables
solve_method = int(nuke.thisNode().knob("AssistType").getValue()) #0 = Local, 1 = Median, 2 = Average
frameForRef = nuke.frame()
StartFrame = int(nuke.thisNode().knob("InputFrom").value())
EndFrame = int(nuke.thisNode().knob("InputTo").value())
#Grap the number of trackers.
n_tracks = int(_node["tracks"].toScript().split(" ")[3])
#Constants etc.
numColumns = 31
colTrackX = 2
colTrackY = 3
RefPointList = []
for x in range(0,n_tracks):
track_a = [float(_node.knob("tracks").getValue(numColumns*x + colTrackX)),float(_node.knob("tracks").getValue(numColumns*x + colTrackY))]
RefPointList.append(track_a)
print "the ref point list:",RefPointList
#Grap data from the camera tracker and convert it into a format we can use.
PointData = GrabListData()
print "--Initializing Main Loop--"
trackIdx = 0
for item in RefPointList:
temp_pos = item
#--------------------------
#Resolve backwards [<-----]
for frame in reversed(range(StartFrame,frameForRef)):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame,True)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
_node.knob("tracks").setValueAt(temp_pos[0],frame,numColumns*trackIdx + colTrackX)
_node.knob("tracks").setValueAt(temp_pos[1],frame,numColumns*trackIdx + colTrackY)
#-------------------------
#Resolve forwards [----->]
temp_pos = item
for frame in range(frameForRef,EndFrame):
RefPointList = GetAnimtionList(PointData[0],PointData[1],frame)
_xy = CalculatePositionDelta(solve_method,RefPointList,temp_pos)
temp_pos = [temp_pos[0]+_xy[0],temp_pos[1]+_xy[1]] #Add our calculated motion delta to the current position
_node.knob("tracks").setValueAt(temp_pos[0],frame+1,numColumns*trackIdx + colTrackX)
_node.knob("tracks").setValueAt(temp_pos[1],frame+1,numColumns*trackIdx + colTrackY)
trackIdx += 1
def Initializer(_method):
global RangeKeeper
RangeKeeper = rangeKeeper(_method)
ResolveSelectedNodes()
'''================================================================================
; Function: ResolveSelectedNodes():
; Description: Used to find what functions to run for the given nodes.
;
; Note(s): N/A
;=================================================================================='''
def ResolveSelectedNodes():
frameForRef = int(nuke.thisNode().knob("RefrenceFrameInput").value()) #Not used here... yet
StartFrame = int(nuke.thisNode().knob("InputFrom").value())
EndFrame = int(nuke.thisNode().knob("InputTo").value())
selectedNodes = nuke.root().selectedNodes()
sucess = False
for item in selectedNodes:
itemclass = item.Class()
if itemclass == "CornerPin2D":
sucess = True
SolveCornerpin(item)
print "Cornerpin"
elif itemclass == "Transform" or itemclass == "TransformMasked":
sucess = True
Solve2DTransform(item)
print "Transform"
elif itemclass == "Roto" or itemclass == "RotoPaint":
sucess = True
if nuke.thisNode().knob("assist_rototransform").value():
Solve2DTransform(item)
else:
SolveCurves(item)
print "roto or paint"
elif itemclass == "SplineWarp3":
sucess = True
SolveCurves(item,True)
print "SplineWarp3"
elif itemclass == "Tracker4":
sucess = True
Solve2DTracker(item)
print "Tracker"
else:
print "selected node not supported:",itemclass
if not sucess:
nuke.message("Please select a assistable node in the nodegraph.")
'''================================================================================
; Function: StickIT():
; Description: Used to solve the base build-in warping module
;
; Note(s): N/A
;=================================================================================='''
def StickIT():
#Define Variables
frameForRef = int(nuke.thisNode().knob("RefrenceFrameInput").value())
StartFrame = int(nuke.thisNode().knob("InputFrom").value())
EndFrame = int(nuke.thisNode().knob("InputTo").value())
if frameForRef > EndFrame or frameForRef < StartFrame:
nuke.message("You must set a reference frame inside the active range")
else:
taskB = nuke.ProgressTask('Calculating Solve, please wait...')
NodePin = nuke.toNode("si_sw") #change this to your tracker node!
#Grap data from the camera tracker and convert it into a format we can use.
PointData = GrabListData()
#03: Get a set of reference points. This is the points we want to move.
RefPointList = GetAnimtionList(PointData[0],PointData[1],frameForRef,False,True)
#04: Go through all of the frames and triangulate best points to move the refpoints with.
start = time.clock()
finalAnimation = []
for item in RefPointList:
zx = item[0][1]
zy = item[0][2]
tempAnimation = []
tempAnimation.append([frameForRef,item[0][1],item[0][2]]) #Add a keyframe on the reference frame
#Now start from the ref frame and move back
for frame in reversed(range(StartFrame,frameForRef)):
newOffset = GetNearestPoints(item[0],GetAnimtionList(PointData[0],PointData[1],frame,True))
tempAnimation.append([frame,item[0][1]+newOffset[0],item[0][2]+newOffset[1]])
item[0][1] = item[0][1]+newOffset[0]
item[0][2] = item[0][2]+newOffset[1]
#Now start from the ref frame and move forward
for frame in range(frameForRef,EndFrame):
newOffset = GetNearestPoints([0,zx,zy],GetAnimtionList(PointData[0],PointData[1],frame))
tempAnimation.append([frame+1,zx+newOffset[0],zy+newOffset[1]])
zx = zx+newOffset[0]
zy = zy+newOffset[1]
#Now add the animation created to the animation list
finalAnimation.append(sorted(tempAnimation))
#print finalAnimation
end = time.clock()
print "%.2gs" % (end-start)
CreateWarpPinPair(NodePin,finalAnimation,frameForRef)
del(taskB)
#GLOBALS:
RangeKeeper = 0 |
import tkinter as tk
import numpy as np
import time
import matplotlib as mpl
from PIL import Image, ImageTk
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2Tk
class MoviePlayer(tk.Frame):
def __init__(self, data, Ts=1):
# image setup
self.data = data # source of images
self.n, self.h, self.w = self.data.shape
self.imshape = (self.w, self.h)
self.Ts = Ts
# tkinter setup
self.root = tk.Tk()
self.master = self.root
window_size = [self.w, self.h]
tk.Frame.__init__(self, master=self.master,
width=window_size[0], height=window_size[1])
self.canvas = tk.Canvas(self, width=self.w, height=self.h)
# Populate frame
self.canvas.place(relx=0,rely=0,x=0,y=0)
self.pack()
# tk bindings
self.bind("<Destroy>", self.end)
self.root.bind('f', self.faster)
self.root.bind('s', self.slower)
# messing with mpl
fig = mpl.figure.Figure(figsize=(2, 1))
ax = fig.add_axes([0, 0, 1, 1])
self.line, = ax.plot(np.arange(1000), np.random.random(1000))
canvas = FigureCanvasTkAgg(fig, self)
canvas.get_tk_widget().place(relx=.5, rely=.5)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack()
# runtime variables
self._recall = None # stores `after' event
self.ended = False
# run
self.after(0, self.step)
def faster(self, *args):
self.Ts /= 1.5
def slower(self, *args):
self.Ts *= 1.5
def step(self):
if self.ended:
return
# acquire image
self.im = Image.fromarray(self.data.get())
# convert image for display
self.photo = ImageTk.PhotoImage(image=self.im) # 1-3 msec
self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) # 1-5 msec
# call next step
self.recall = self.after(int(self.Ts*1000), self.step)
def end(self, *args):
self.ended = True
self.root.quit()
class Data():
def __init__(self):
self.shape = [3000,512,512]
def get(self):
frame = np.random.random([512,512])
minn,maxx = frame.min(),frame.max()
frame = (255*((frame-minn)/(maxx-minn))).astype(np.uint8)
return frame
if __name__ == '__main__':
data = Data()
mp = MoviePlayer(data, Ts=1/30)
|
import beautifulsoup4
import cookielib
import mechanize
br = mechanize.Browser()
jar = cookielib.LWPCookieJar()
br.set_cookiejar(jar)
br.set_handle_equiv( True )
br.set_handle_gzip( True )
br.set_handle_redirect( True )
br.set_handle_referer( True )
br.set_handle_robots( False )
|
def cal(n):
ans=0;a=0
if(n!=1):
x=1;a=0
if(n<=10):
if(n>=7):
ans+=2
return ans
if(n>=4):
ans+=1
return ans
return ans
while(n>=x):
x*=10
a+=1
ans+=2**(a)-2
b=1
print(n,a)
if(int(str(n)[0])>=7):
b*=2
elif(int(str(n)[0])>=4):
b*=1
else:
b=0
ans+=b*(2**a)
return ans
for _ in range(int(input())):
l,r=map(int,input().split())
x,y=1,1
a,b=0,0
print(cal(r),cal(l))
|
import functools
import re
from typing import Any, Dict, Set, Tuple, Type
from django.core.exceptions import FieldDoesNotExist
from django.db.models import OneToOneRel, Q
from django.db.models.functions import Coalesce
from tate.legacy.models import LegacyPageMixin
from wagtail.core.models import Page, get_page_models
from wagtail.core.query import PageQuerySet
def get_legacy_page_type_related_names(
values: Dict[Type, str], model_class: Type, known_subclasses=None, prefix=None
):
if known_subclasses is None:
known_subclasses = set(
model
for model in get_page_models()
if not model._meta.abstract
and issubclass(model, model_class)
and issubclass(model, LegacyPageMixin)
)
for rel in (
rel
for rel in model_class._meta.related_objects
if isinstance(rel, OneToOneRel) and rel.related_model in known_subclasses
):
rel_name = f"{prefix}__{rel.name}" if prefix else rel.name
values[rel.related_model] = rel_name
get_legacy_page_type_related_names(
values, rel.related_model, known_subclasses, rel_name
)
return values
@functools.lru_cache(maxsize=None)
def get_concrete_subclass_related_names(model: Type) -> Dict[Type, str]:
return get_legacy_page_type_related_names({}, model)
@functools.lru_cache(maxsize=None)
def get_concrete_local_field_names(model: Type) -> Set[str]:
return set(
f.name
for f in model._meta.get_fields(include_parents=False, include_hidden=False)
)
def get_legacy_page_field_values(
field_name: str, queryset: PageQuerySet = None, exclude_nulls=False
) -> Tuple[Any]:
if queryset is None:
queryset = Page.objects.all()
coalesce_keys = []
for model, related_name in get_concrete_subclass_related_names(
queryset.model
).items():
try:
model._meta.get_field(field_name)
coalesce_keys.append(f"{related_name}__{field_name}")
except FieldDoesNotExist:
pass
if not coalesce_keys:
return ()
queryset = queryset.annotate(**{field_name: Coalesce(*coalesce_keys)})
if exclude_nulls:
queryset = queryset.exclude(**{f"{field_name}__isnull": True})
return tuple(queryset.values_list(field_name, flat=True))
def get_legacy_page_matches(
value: Any, *field_names: str, queryset: PageQuerySet = None, lookup_type=None
):
if lookup_type is None:
lookup_type = "exact"
q = Q()
if queryset is None:
queryset = Page.objects.all()
for name in field_names:
if name == "legacy_id" and getattr(queryset.model, "LEGACY_ID_FIELD", None):
q |= Q(**{f"{queryset.model.LEGACY_ID_FIELD}__{lookup_type}": value})
else:
try:
queryset.model._meta.get_field(name)
except FieldDoesNotExist:
pass
else:
q |= Q(**{f"{name}__{lookup_type}": value})
for model, related_name in get_concrete_subclass_related_names(
queryset.model
).items():
model_field_names = get_concrete_local_field_names(model)
for name in field_names:
if name == "legacy_id" and getattr(model, "LEGACY_ID_FIELD", None):
lookup_field = model.LEGACY_ID_FIELD
else:
lookup_field = name
if lookup_field in model_field_names:
q |= Q(**{f"{related_name}__{lookup_field}__{lookup_type}": value})
if not q:
return queryset.none()
return queryset.filter(q)
def get_legacy_path_matches(value: str, queryset: PageQuerySet = None, exact=True):
if exact:
lookup_val = value
lookup_type = "exact"
else:
lookup_val = r"^/?" + re.escape(value.strip("/ ")) + r"/?$"
lookup_type = "iregex"
return get_legacy_page_matches(
lookup_val, "legacy_path", queryset=queryset, lookup_type=lookup_type
)
def get_legacy_id_matches(value: Any, queryset: PageQuerySet = None):
return get_legacy_page_matches(value, "legacy_id", queryset=queryset)
|
"""
A program to initialize all fonts used in Kivy Cupertino
"""
from kivycupertino import fonts_path
from kivy.core.text import LabelBase
fonts = [
{
'name': 'San Francisco',
'fn_regular': fonts_path + 'sf.otf',
'fn_italic': fonts_path + 'sf-italic.otf',
'fn_bold': fonts_path + 'sf-bold.otf',
'fn_bolditalic': fonts_path + 'sf-bold-italic.otf',
},
{
'name': 'New York',
'fn_regular': fonts_path + 'ny.ttf',
'fn_italic': fonts_path + 'ny-italic.ttf',
'fn_bold': fonts_path + 'ny-bold.otf',
'fn_bolditalic': fonts_path + 'ny-bold-italic.otf',
},
{
'name': 'SF Symbols',
'fn_regular': fonts_path + 'sfsymbols.ttf'
}
]
for font in fonts:
LabelBase.register(**font)
|
"""
Defines the standard functions and properties of
"""
import logging, os, operator
from math import sin, cos, atan2, pi
from collections import OrderedDict
import json
import numpy as np
class Model(object):
'''The abstract class for a grounded semantics model
Model's Contract:
- constructor accepts name and already made components
- classmethods accept the class of their components to instantiate
- each component is a category/class/hypothesis
- the model computes P(category | input)
'''
def __init__(self, name, components=[], graceful_failure=False):
''' instantiate this model with the specified name and components '''
self.name = name
self.components = components
self._lookup = {c.name:c for c in components}
self.graceful_failure = graceful_failure
@classmethod
def from_json(cls, filename, ComponentClass):
''' accept filename and component class for insantiating
json should be in the form of:
{
'name': name,
'components': {
'name': component_name,
'parameters': parameters
}
}
'''
with open(filename) as fp:
info = json.load(fp)
components = list(map(ComponentClass.from_dict, info['components']))
for i, c in enumerate(components):
c.index = i
return cls(info['name'], components)
def __contains__(self, k):
''' test for membership '''
return k in self._lookup
def __getitem__(self, k):
if k in self:
return self._lookup[k]
if self.graceful_failure:
self.logger.warning("[-][OOV][{} not in {}]".format(k, self.name))
return None
raise OutOfVocabularyException
def __len__(self):
return len(self.components)
def __repr__(self):
return self.__str__()
def __str__(self):
if len(self) == 0:
err_string = "Load with Model.from_json(filename) or .pretrained() instead"
else:
err_string = ""
return "<Model>{}; {} components; {}".format(self.name, len(self), err_string)
def predict(self, *datum):
'''
return component with highest probability
e.g. argmax_component P(component, datum)
'''
if len(datum) == 1 and isinstance(datum[0], (list,tuple)):
datum = datum[0]
p_vec = np.array([component(datum) for component in self.components])
try:
return [self.components[i] for i in p_vec.argmax(axis=0)]
except TypeError as e:
return self.components[p_vec.argmax()]
def likelihood(self, datum, component_name):
'''
return probability of the component given the datum
e.g. P(component | datum)
'''
assert self[component_name]
p_vec = self.posterior(datum)
return p_vec[self._lookup[component_name].index]
def posterior(self, *datum):
if len(datum) == 1 and isinstance(datum[0], (list,tuple)):
datum = datum[0]
p_vec = np.array([component(datum) for component in self.components])
try:
p_vec /= p_vec.sum(axis=0, keepdims=True)
except TypeError as e:
p_vec /= p_vec.sum()
return Distribution([c.name for c in self.components], p_vec)
# if neat:
# return {c.name:p for c,p in zip(self.components, p_vec)}
# else:
# return p_vec
class Component(object):
def __init__(self, name, *args, **kwargs):
self.name = name
## this plays well with mixins
try:
super(Component, self).__init__(*args, **kwargs)
except TypeError as te:
''
@classmethod
def from_dict(cls, info):
return cls(info['name'], info)
@property
def prior(self):
raise NotImplementedError
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __call__(self, *args):
return self.pdf(*args)*self.prior
def pdf(self, x):
raise NotImplementedError
class Distribution(object):
def __init__(self, names, numbers):
self.names = set(names)
self.numbers = numbers
self.lookup = dict(zip(names, numbers))
self.sortd = sorted(self.lookup.items(), key=lambda x: x[1], reverse=True)
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
elif key in self.names:
return self.lookup[key]
elif 'top' == key[:3] and key[3:].isdigit():
return dict(self.sortd[:int(key[3:])])
else:
return self.lookup
def __getitem__(self, key):
return self.lookup[key]
class OutOfVocabularyException(Exception):
pass
|
from .transforms import Compose
from .transforms import RandomHorizontalFlip
from .transforms import ToTensor
from .transforms import Normalize
from .build import build_transforms
|
""" OPA authorization middleware. """
__author__ = "William Tucker"
__date__ = "2021-01-18"
__copyright__ = "Copyright 2020 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
import logging
from django.conf import settings
from opa_client.opa import OpaClient
from authorize.middleware import AuthorizationMiddleware
from authenticate.utils import get_user
from .exceptions import OPAAuthorizationError
LOG = logging.getLogger(__name__)
class OPAAuthorizationMiddleware(AuthorizationMiddleware):
""" Middleware for handling authorization via an OPA server. """
def __init__(self, *args):
super().__init__(*args)
opa_settings = getattr(settings, "OPA_SERVER", {})
self._client = OpaClient(**opa_settings)
self._package_path = opa_settings.get("package_path")
self._rule_name = opa_settings.get("rule_name")
def _is_authorized(self, request, resource):
user = get_user(request)
action_map = {
"GET": "Read",
"POST": "Write",
}
action = action_map[request.method]
LOG.debug(f"Querying OPA authz server for resource: {resource}")
subject = None
if user:
subject = {
"user": user.username,
"groups": user.groups
}
check_data = {
"resource": resource,
"subject": subject,
"action": action
}
# Check authorization for resource
is_authorized = False
try:
permission = self._client.check_policy_rule(
input_data=check_data,
package_path=self._package_path,
rule_name=self._rule_name
)
is_authorized = permission.get("result", False)
except OPAAuthorizationError as e:
username = user.username if user else "anonymous"
LOG.info(f"Authorization failed for user: {username}")
raise e
return is_authorized
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import deque
from math import factorial
import unittest
FNAME = "input.txt"
TEST_FNAME = "test_input.txt"
def main():
"""Main function."""
data = load_input(FNAME)
len_preamble = 25
part1(data, len_preamble)
part2(data, len_preamble)
print("\nUnittests")
unittest.main()
def part1(data, len_preamble):
"""Solution to day 9, part 1."""
cipher = Cipher(data, len_preamble)
number = cipher.wrong_number()
print(f"First false value is {number}.")
return number
def part2(data, len_preamble):
"""Solution to day 9, part 2."""
cipher = Cipher(data, len_preamble)
result = cipher.find_weakness()
if result is None:
print("No solution found!")
else:
print(f"The encryption weakness is {result}.")
return result
def load_input(fname):
"""Read in the data, return as a list."""
with open(fname, "r") as f:
data = f.readlines()
data = [int(x) for x in data]
return data
class Cipher:
def __init__(self, cipher, len_preamble):
self.cipher = cipher
self.preamble = cipher[:len_preamble]
self.len_preamble = len_preamble
self.possible_vals = self.init_possible_vals()
def init_possible_vals(self):
# Use deque which allows for efficient FIFO operations.
vals = deque()
for i, x in enumerate(self.preamble[:-1]):
for y in self.preamble[i+1:]:
vals.append(x + y)
return vals
def wrong_number(self):
"""Part 1, find number which is not the sum of previous ones."""
for idx in range(self.len_preamble, len(self.cipher)):
if self.cipher[idx] not in self.possible_vals:
break
self.update_possible_vals(idx)
idx += 1
return self.cipher[idx]
def update_possible_vals(self, idx):
for _ in range(1, self.len_preamble):
self.possible_vals.popleft()
insert_idx = 0
for i in range(self.len_preamble-1, 0, -1):
insert_idx += i
val = self.cipher[idx] + self.cipher[idx-i]
self.possible_vals.insert(insert_idx-1, val)
def find_weakness(self):
"""Part 2. Brute-forced because why not"""
target_val = self.wrong_number()
for i, x in enumerate(self.cipher):
sum_ = x
for j, y in enumerate(self.cipher[i+1:]):
sum_ += y
if sum_ == target_val:
min_val = min(self.cipher[i:i+j+1])
max_val = max(self.cipher[i:i+j+1])
return min_val + max_val
elif sum_ > target_val:
break
return None
class TestMethods(unittest.TestCase):
def setUp(self):
self.data = load_input(TEST_FNAME)
self.len_preamble = 5
def test_part1(self):
value = part1(self.data, self.len_preamble)
self.assertEqual(value, 127)
def test_part2(self):
value = part2(self.data, self.len_preamble)
self.assertEqual(value, 62)
if __name__=="__main__":
main()
|
# coding=utf-8
"""
Collects all number values from the db.serverStatus() command, other
values are ignored.
**Note:** this collector expects pymongo 2.4 and onward. See the pymongo
changelog for more details:
http://api.mongodb.org/python/current/changelog.html#changes-in-version-2-4
#### Dependencies
* pymongo
#### Example Configuration
MongoDBCollector.conf
```
enabled = True
hosts = localhost:27017, alias1@localhost:27018, etc
```
"""
import diamond.collector
import datetime
from diamond.collector import str_to_bool
import re
import zlib
try:
import pymongo
except ImportError:
pymongo = None
try:
from pymongo import ReadPreference
except ImportError:
ReadPreference = None
class MongoDBCollector(diamond.collector.Collector):
MAX_CRC32 = 4294967295
def __init__(self, *args, **kwargs):
self.__totals = {}
super(MongoDBCollector, self).__init__(*args, **kwargs)
def get_default_config_help(self):
config_help = super(MongoDBCollector, self).get_default_config_help()
config_help.update({
'hosts': 'Array of hostname(:port) elements to get metrics from'
'Set an alias by prefixing host:port with alias@',
'host': 'A single hostname(:port) to get metrics from'
' (can be used instead of hosts and overrides it)',
'user': 'Username for authenticated login (optional)',
'passwd': 'Password for authenticated login (optional)',
'databases': 'A regex of which databases to gather metrics for.'
' Defaults to all databases.',
'ignore_collections': 'A regex of which collections to ignore.'
' MapReduce temporary collections (tmp.mr.*)'
' are ignored by default.',
'collection_sample_rate': 'Only send stats for a consistent subset '
'of collections. This is applied after '
'collections are ignored via '
'ignore_collections Sampling uses crc32 '
'so it is consistent across '
'replicas. Value between 0 and 1. '
'Default is 1',
'network_timeout': 'Timeout for mongodb connection (in'
' milliseconds). There is no timeout by'
' default.',
'simple': 'Only collect the same metrics as mongostat.',
'translate_collections': 'Translate dot (.) to underscores (_)'
' in collection names.',
'replace_dashes_in_metric_keys': 'Replace dashes (-) to dots (.)'
' in database object names and metrics',
'ssl': 'True to enable SSL connections to the MongoDB server.'
' Default is False',
'replica': 'True to enable replica set logging. Reports health of'
' individual nodes as well as basic aggregate stats.'
' Default is False',
'replset_node_name': 'Identifier for reporting replset metrics. '
'Default is _id'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MongoDBCollector, self).get_default_config()
config.update({
'path': 'mongo',
'hosts': ['localhost'],
'user': None,
'passwd': None,
'databases': '.*',
'ignore_collections': '^tmp\.mr\.',
'network_timeout': None,
'simple': 'False',
'translate_collections': 'False',
'replace_dashes_in_metric_keys': 'True',
'collection_sample_rate': 1,
'ssl': False,
'replica': False,
'replset_node_name': '_id'
})
return config
def collect(self):
"""Collect number values from db.serverStatus()"""
if pymongo is None:
self.log.error('Unable to import pymongo')
return
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
# we need this for backwards compatibility
if 'host' in self.config:
hosts = [self.config['host']]
# convert network_timeout to integer
if self.config['network_timeout']:
self.config['network_timeout'] = int(
self.config['network_timeout'])
# convert collection_sample_rate to float
if self.config['collection_sample_rate']:
self.config['collection_sample_rate'] = float(
self.config['collection_sample_rate'])
# use auth if given
if 'user' in self.config:
user = self.config['user']
else:
user = None
if 'passwd' in self.config:
passwd = self.config['passwd']
else:
passwd = None
for host in hosts:
matches = re.search('((.+)\@)?(.+)?', host)
alias = matches.group(2)
host = matches.group(3)
if alias is None:
if len(hosts) == 1:
# one host only, no need to have a prefix
base_prefix = []
else:
base_prefix = [re.sub('[:\.]', '_', host)]
else:
base_prefix = [alias]
try:
# Ensure that the SSL option is a boolean.
if type(self.config['ssl']) is str:
self.config['ssl'] = str_to_bool(self.config['ssl'])
if ReadPreference is None:
conn = pymongo.MongoClient(
host,
socketTimeoutMS=self.config['network_timeout'],
ssl=self.config['ssl'],
)
else:
conn = pymongo.MongoClient(
host,
socketTimeoutMS=self.config['network_timeout'],
ssl=self.config['ssl'],
read_preference=ReadPreference.SECONDARY,
)
except Exception as e:
self.log.error('Couldnt connect to mongodb: %s', e)
continue
# try auth
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception as e:
self.log.error(
'User auth given, but could not autheticate' +
' with host: %s, err: %s' % (host, e))
return{}
data = conn.db.command('serverStatus')
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config['simple']):
data = self._extract_simple_data(data)
if str_to_bool(self.config['replica']):
try:
replset_data = conn.admin.command('replSetGetStatus')
self._publish_replset(replset_data, base_prefix)
except pymongo.errors.OperationFailure as e:
self.log.error('error getting replica set status', e)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config['databases'])
ignored_collections = re.compile(self.config['ignore_collections'])
sample_threshold = self.MAX_CRC32 * self.config[
'collection_sample_rate']
for db_name in conn.database_names():
if not db_name_filter.search(db_name):
continue
db_stats = conn[db_name].command('dbStats')
db_prefix = base_prefix + ['databases', db_name]
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
if (self.config['collection_sample_rate'] < 1 and (
zlib.crc32(collection_name) & 0xffffffff
) > sample_threshold):
continue
collection_stats = conn[db_name].command('collstats',
collection_name)
if str_to_bool(self.config['translate_collections']):
collection_name = collection_name.replace('.', '_')
collection_prefix = db_prefix + [collection_name]
self._publish_dict_with_prefix(collection_stats,
collection_prefix)
def _publish_replset(self, data, base_prefix):
""" Given a response to replSetGetStatus, publishes all numeric values
of the instance, aggregate stats of healthy nodes vs total nodes,
and the observed statuses of all nodes in the replica set.
"""
prefix = base_prefix + ['replset']
self._publish_dict_with_prefix(data, prefix)
total_nodes = len(data['members'])
healthy_nodes = reduce(lambda value, node: value + node['health'],
data['members'], 0)
self._publish_dict_with_prefix({
'healthy_nodes': healthy_nodes,
'total_nodes': total_nodes
}, prefix)
for node in data['members']:
replset_node_name = node[self.config['replset_node_name']]
node_name = str(replset_node_name.split('.')[0])
self._publish_dict_with_prefix(node, prefix + ['node', node_name])
def _publish_transformed(self, data, base_prefix):
""" Publish values of type: counter or percent """
self._publish_dict_with_prefix(data.get('opcounters', {}),
base_prefix + ['opcounters_per_sec'],
self.publish_counter)
self._publish_dict_with_prefix(data.get('opcountersRepl', {}),
base_prefix +
['opcountersRepl_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['backgroundFlushing_per_sec'],
'flushes',
data.get('backgroundFlushing', {}),
self.publish_counter)
self._publish_dict_with_prefix(data.get('network', {}),
base_prefix + ['network_per_sec'],
self.publish_counter)
self._publish_metrics(base_prefix + ['extra_info_per_sec'],
'page_faults',
data.get('extra_info', {}),
self.publish_counter)
def get_dotted_value(data, key_name):
key_name = key_name.split('.')
for i in key_name:
data = data.get(i, {})
if not data:
return 0
return data
def compute_interval(data, total_name):
current_total = get_dotted_value(data, total_name)
total_key = '.'.join(base_prefix + [total_name])
last_total = self.__totals.get(total_key, current_total)
interval = current_total - last_total
self.__totals[total_key] = current_total
return interval
def publish_percent(value_name, total_name, data):
value = float(get_dotted_value(data, value_name) * 100)
interval = compute_interval(data, total_name)
key = '.'.join(base_prefix + ['percent', value_name])
self.publish_counter(key, value, time_delta=bool(interval),
interval=interval)
publish_percent('globalLock.lockTime', 'globalLock.totalTime', data)
publish_percent('indexCounters.btree.misses',
'indexCounters.btree.accesses', data)
locks = data.get('locks')
if locks:
if '.' in locks:
locks['_global_'] = locks['.']
del (locks['.'])
key_prefix = '.'.join(base_prefix + ['percent'])
db_name_filter = re.compile(self.config['databases'])
interval = compute_interval(data, 'uptimeMillis')
for db_name in locks:
if not db_name_filter.search(db_name):
continue
r = get_dotted_value(
locks,
'%s.timeLockedMicros.r' % db_name)
R = get_dotted_value(
locks,
'.%s.timeLockedMicros.R' % db_name)
value = float(r + R) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.read' % db_name,
value, time_delta=bool(interval),
interval=interval)
w = get_dotted_value(
locks,
'%s.timeLockedMicros.w' % db_name)
W = get_dotted_value(
locks,
'%s.timeLockedMicros.W' % db_name)
value = float(w + W) / 10
if value:
self.publish_counter(
key_prefix + '.locks.%s.write' % db_name,
value, time_delta=bool(interval), interval=interval)
def _publish_dict_with_prefix(self, dict, prefix, publishfn=None):
for key in dict:
self._publish_metrics(prefix, key, dict, publishfn)
def _publish_metrics(self, prev_keys, key, data, publishfn=None):
"""Recursively publish keys"""
if key not in data:
return
value = data[key]
keys = prev_keys + [key]
keys = [x.replace(" ", "_") for x in keys]
if str_to_bool(self.config['replace_dashes_in_metric_keys']):
keys = [x.replace("-", ".") for x in keys]
if not publishfn:
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif isinstance(value, int) or isinstance(value, float):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value))
elif isinstance(value, datetime.datetime):
publishfn('.'.join(keys), long(value.strftime('%s')))
def _extract_simple_data(self, data):
return {
'connections': data.get('connections'),
'globalLock': data.get('globalLock'),
'indexCounters': data.get('indexCounters')
}
|
from sqlalchemy import and_
from changes.config import db
from changes.constants import Result, Status
from changes.models import Project, ProjectPlan, Build, Job, JobPlan
from changes.utils.locking import lock
@lock
def update_project_stats(project_id):
last_5_builds = Build.query.filter_by(
result=Result.passed,
status=Status.finished,
project_id=project_id,
).order_by(Build.date_finished.desc())[:5]
if last_5_builds:
avg_build_time = sum(
b.duration for b in last_5_builds
if b.duration
) / len(last_5_builds)
else:
avg_build_time = None
db.session.query(Project).filter(
Project.id == project_id
).update({
Project.avg_build_time: avg_build_time,
}, synchronize_session=False)
@lock
def update_project_plan_stats(project_id, plan_id):
job_plan = JobPlan.query.filter(
JobPlan.project_id == project_id,
JobPlan.plan_id == plan_id,
).first()
if not job_plan:
return
last_5_builds = Job.query.filter(
Job.result == Result.passed,
Job.status == Status.finished,
Job.project_id == project_id,
).join(
JobPlan,
and_(
JobPlan.id == job_plan.id,
JobPlan.job_id == Job.id,
)
).order_by(Job.date_finished.desc())[:5]
if last_5_builds:
avg_build_time = sum(
b.duration for b in last_5_builds
if b.duration
) / len(last_5_builds)
else:
avg_build_time = None
db.session.query(ProjectPlan).filter(
ProjectPlan.project_id == job_plan.project_id,
ProjectPlan.plan_id == job_plan.plan_id,
).update({
ProjectPlan.avg_build_time: avg_build_time,
}, synchronize_session=False)
|
"""Model evaluation tools."""
import os
import sklearn
import itertools
import numpy as np
import pandas as pd
import sklearn.metrics as skmetrics
from matplotlib import pyplot as plt
from healthcareai.common.healthcareai_error import HealthcareAIError
DIAGONAL_LINE_COLOR = '#bbbbbb'
DIAGONAL_LINE_STYLE = 'dotted'
def compute_roc(y_test, probability_predictions):
"""
Compute TPRs, FPRs, best cutoff, ROC auc, and raw thresholds.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate ROC
false_positive_rates, true_positive_rates, roc_thresholds = skmetrics.roc_curve(y_test, probability_predictions)
roc_auc = skmetrics.roc_auc_score(y_test, probability_predictions)
# get ROC ideal cutoffs (upper left, or 0,1)
roc_distances = (false_positive_rates - 0) ** 2 + (true_positive_rates - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
roc_index = np.where(roc_distances == np.min(roc_distances))[0][0]
best_tpr = true_positive_rates[roc_index]
best_fpr = false_positive_rates[roc_index]
ideal_roc_cutoff = roc_thresholds[roc_index]
return {'roc_auc': roc_auc,
'best_roc_cutoff': ideal_roc_cutoff,
'best_true_positive_rate': best_tpr,
'best_false_positive_rate': best_fpr,
'true_positive_rates': true_positive_rates,
'false_positive_rates': false_positive_rates,
'roc_thresholds': roc_thresholds}
def compute_pr(y_test, probability_predictions):
"""
Compute Precision-Recall, thresholds and PR AUC.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate PR
precisions, recalls, pr_thresholds = skmetrics.precision_recall_curve(y_test, probability_predictions)
pr_auc = skmetrics.average_precision_score(y_test, probability_predictions)
# get ideal cutoffs for suggestions (upper right or 1,1)
pr_distances = (precisions - 1) ** 2 + (recalls - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
pr_index = np.where(pr_distances == np.min(pr_distances))[0][0]
best_precision = precisions[pr_index]
best_recall = recalls[pr_index]
ideal_pr_cutoff = pr_thresholds[pr_index]
return {'pr_auc': pr_auc,
'best_pr_cutoff': ideal_pr_cutoff,
'best_precision': best_precision,
'best_recall': best_recall,
'precisions': precisions,
'recalls': recalls,
'pr_thresholds': pr_thresholds}
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
Returns:
dict: A dictionary of metrics objects
"""
# Get predictions
predictions = trained_sklearn_estimator.predict(x_test)
# Calculate individual metrics
mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)
result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}
return result
def calculate_binary_classification_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
Returns:
dict: A dictionary of metrics objects
"""
# Squeeze down y_test to 1D
y_test = np.squeeze(y_test)
_validate_predictions_and_labels_are_equal_length(x_test, y_test)
# Get binary and probability classification predictions
binary_predictions = np.squeeze(trained_sklearn_estimator.predict(x_test))
probability_predictions = np.squeeze(trained_sklearn_estimator.predict_proba(x_test)[:, 1])
# Calculate accuracy
accuracy = skmetrics.accuracy_score(y_test, binary_predictions)
roc = compute_roc(y_test, probability_predictions)
pr = compute_pr(y_test, probability_predictions)
# Unpack the roc and pr dictionaries so the metric lookup is easier for plot and ensemble methods
return {'accuracy': accuracy, **roc, **pr}
def roc_plot_from_thresholds(roc_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a ROC curve for each model.
Args:
roc_thresholds_by_model (dict): A dictionary of ROC thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TRP)')
plt.title('Receiver Operating Characteristic (ROC)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [0, 1], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, roc_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
roc_auc = metrics['roc_auc']
tpr = metrics['true_positive_rates']
fpr = metrics['false_positive_rates']
best_true_positive_rate = metrics['best_true_positive_rate']
best_false_positive_rate = metrics['best_false_positive_rate']
if debug:
print('{} model:'.format(model_name))
print(pd.DataFrame({'FPR': fpr, 'TPR': tpr}))
# plot the line
label = '{} (ROC AUC = {})'.format(model_name, round(roc_auc, 2))
plt.plot(fpr, tpr, color=color, label=label)
plt.plot([best_false_positive_rate], [best_true_positive_rate], marker='*', markersize=10, color=color)
plt.legend(loc="lower right")
if save:
plt.savefig('ROC.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nROC plot saved in: {}'.format(source_path))
plt.show()
def pr_plot_from_thresholds(pr_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a PR curve for each model.
Args:
pr_thresholds_by_model (dict): A dictionary of PR thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [1, 0], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, pr_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
pr_auc = metrics['pr_auc']
precision = metrics['precisions']
recall = metrics['recalls']
best_recall = metrics['best_recall']
best_precision = metrics['best_precision']
if debug:
print('{} model:'.format(model_name))
print(pd.DataFrame({'Recall': recall, 'Precision': precision}))
# plot the line
label = '{} (PR AUC = {})'.format(model_name, round(pr_auc, 2))
plt.plot(recall, precision, color=color, label=label)
plt.plot([best_recall], [best_precision], marker='*', markersize=10, color=color)
plt.legend(loc="lower left")
if save:
plt.savefig('PR.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nPR plot saved in: {}'.format(source_path))
plt.show()
def plot_random_forest_feature_importance(trained_random_forest, x_train, feature_names, feature_limit=15, save=False):
"""
Given a random forest estimator, an x_train array, the feature names save or display a feature importance plot.
Args:
trained_random_forest (sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.RandomForestRegressor):
x_train (numpy.array): A 2D numpy array that was used for training
feature_names (list): Column names in the x_train set
feature_limit (int): Number of features to display on graph
save (bool): True to save the plot, false to display it in a blocking thread
"""
_validate_random_forest_estimator(trained_random_forest)
# Sort the feature names and relative importances
# TODO this portion could probably be extracted and tested, since the plot is difficult to test
aggregate_features_importances = trained_random_forest.feature_importances_
indices = np.argsort(aggregate_features_importances)[::-1]
sorted_feature_names = [feature_names[i] for i in indices]
# limit the plot to the top n features so it stays legible on models with lots of features
subset_indices = indices[0:feature_limit]
number_of_features = x_train.shape[1]
# build a range using the lesser value
max_features = min(number_of_features, feature_limit)
x_axis_limit = range(max_features)
# Get the standard deviations for error bars
standard_deviations = _standard_deviations_of_importances(trained_random_forest)
# Turn off matplotlib interactive mode
plt.ioff()
# Set up the plot and axes
figure = plt.figure()
plt.title('Top {} (of {}) Important Features'.format(max_features, number_of_features))
plt.ylabel('Relative Importance')
# Plot each feature
plt.bar(
# this should go as far as the model or limit whichever is less
x_axis_limit,
aggregate_features_importances[subset_indices],
color="g",
yerr=standard_deviations[subset_indices],
align="center")
plt.xticks(x_axis_limit, sorted_feature_names, rotation=90)
# x axis scales by default
# set y axis min to zero
plt.ylim(ymin=0)
# plt.tight_layout() # Do not use tight_layout until https://github.com/matplotlib/matplotlib/issues/5456 is fixed
# Because long feature names cause this error
# Save or display the plot
if save:
plt.savefig('FeatureImportances.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nFeature importance plot saved in: {}'.format(source_path))
# Close the figure so it does not get displayed
plt.close(figure)
else:
plt.show()
def _validate_random_forest_estimator(trained_random_forest):
"""
Validate that an input is a random forest estimator and raise an error if it is not.
Args:
trained_random_forest: any input
"""
is_rf_classifier = isinstance(trained_random_forest, sklearn.ensemble.RandomForestClassifier)
is_rf_regressor = isinstance(trained_random_forest, sklearn.ensemble.RandomForestRegressor)
if not (is_rf_classifier or is_rf_regressor):
raise HealthcareAIError('Feature plotting only works with a scikit learn Random Forest estimator.')
def _standard_deviations_of_importances(trained_random_forest):
"""
Given a scikit-learn trained random forest estimator, return the standard deviations of all feature importances.
Args:
trained_random_forest (sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.RandomForestRegressor): the
trained estimator
Returns:
list: A numeric list
"""
# Get the individual feature importances from each tree to find the standard deviation for plotting error bars
individual_feature_importances = [tree.feature_importances_ for tree in trained_random_forest.estimators_]
standard_deviations = np.std(individual_feature_importances, axis=0)
return standard_deviations
def _validate_predictions_and_labels_are_equal_length(predictions, true_values):
if len(predictions) == len(true_values):
return True
else:
raise HealthcareAIError('The number of predictions is not equal to the number of true_values.')
if __name__ == '__main__':
pass
|
'''
创建应用程序,并注册相关蓝图
'''
import dataclasses
import uuid
from flask import Flask as _Flask
from flask.json import JSONEncoder as _JSONEncoder
from app.libs.error_code import ServerError
from datetime import date, datetime
from werkzeug.http import http_date
# from flask_wtf.csrf import CsrfProtect
from flask_login import LoginManager
# from app.libs.email import mail
# from flask_cache import Cache
# from app.libs.limiter import Limiter
__author__ = '带土'
class JSONEncoder(_JSONEncoder):
def default(self, o):
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
if isinstance(o, date):
return o.strftime('%Y-%m-%d')
if isinstance(o, datetime):
return http_date(o.utctimetuple())
if isinstance(o, uuid.UUID):
return str(o)
if dataclasses and dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
raise ServerError()
class Flask(_Flask):
json_encoder = JSONEncoder
login_manager = LoginManager()
# cache = Cache(config={'CACHE_TYPE': 'simple'})
# limiter = Limiter()
def register_blueprint(app):
from app.web import web
app.register_blueprint(web)
from app.api.user import create_blueprint_user
app.register_blueprint(create_blueprint_user(), url_prefix='/api/user')
from app.api.cms import create_blueprint_cms
app.register_blueprint(create_blueprint_cms(), url_prefix='/api/cms')
from app.api.spider import create_blueprint_spider
app.register_blueprint(create_blueprint_spider(), url_prefix='/api/spider')
from app.api.shop import create_blueprint_shop
app.register_blueprint(create_blueprint_shop(), url_prefix='/api/shop')
def register_plugin(app):
from app.models.base import db
# 注册SQLAlchemy
db.init_app(app)
with app.app_context():
db.create_all()
def create_app(config=None):
app = Flask(__name__)
#: load default configuration
app.config.from_object('app.config.settings')
app.config.from_object('app.config.secure')
# # 注册email模块
# mail.init_app(app)
#
# # 注册login模块
login_manager.init_app(app)
login_manager.login_view = 'web.login'
login_manager.login_message = '请先登录或注册'
#
# # 注册flask-cache模块
# cache.init_app(app)
# 注册CSRF保护
# csrf = CsrfProtect()
# csrf.init_app(app)
register_blueprint(app)
register_plugin(app)
if config is not None:
if isinstance(config, dict):
app.config.update(config)
elif config.endswith('.py'):
app.config.from_pyfile(config)
return app
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python Cursor on Target Module Test Context."""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pycot # NOQA pylint: disable=C0413,W0611
__author__ = 'Greg Albrecht W2GMD <oss@undef.net>'
__copyright__ = 'Copyright 2020 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
|
import FWCore.ParameterSet.Config as cms
import TrackingTools.TrackFitters.KFFittingSmoother_cfi
GsfElectronFittingSmoother = TrackingTools.TrackFitters.KFFittingSmoother_cfi.KFFittingSmoother.clone()
GsfElectronFittingSmoother.ComponentName = 'GsfElectronFittingSmoother'
GsfElectronFittingSmoother.Fitter = 'GsfTrajectoryFitter'
GsfElectronFittingSmoother.Smoother = 'GsfTrajectorySmoother'
|
import gi
import math
import cairo
import numpy
from dock import Dock, create_icon, get_gicon_pixbuf, pixbuf2image
gi.require_version('Gtk', '3.0') # noqa
gi.require_version('Gdk', '3.0') # noqa
gi.require_version('Gio', '2.0') # noqa
gi.require_version('GObject', '2.0') # noqa
from applications import AppCache, WindowTracker, groupings
from PIL import Image, ImageOps
from gi.repository import Gtk, Gdk, Gio, GLib, GObject
from dominantcolors import rgba2rgb, find_dominant_colors
app_cache = AppCache()
window_tracker = WindowTracker()
class DockWindow(Gtk.Window):
supports_alpha = False
def __init__(self):
super().__init__()
self.set_position(Gtk.WindowPosition.CENTER)
self.set_title("Diffusion Dock")
self.connect("delete-event", Gtk.main_quit)
self.set_app_paintable(True)
self.connect("screen-changed", self.screen_changed)
self.connect("draw", self.expose_draw)
self.set_decorated(False)
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.set_type_hint(Gdk.WindowTypeHint.DOCK)
dock = Dock(None, app_cache=app_cache, window_tracker=window_tracker)
self.add(dock)
style_provider = Gtk.CssProvider()
style_provider.load_from_file(Gio.File.new_for_path("style.css"))
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def aaa(_, a):
print("aaa")
#self.resize(
# max(dock.get_size_request().width, 1), max(dock.get_size_request().height, 1))
# self.set_size_request(100, 100)
# self.resize(100, 100)
print(dock.get_size_request())
# dock.connect("draw", lambda dock, _: self.resize(
# max(dock.get_size_request().width, 1), max(dock.get_size_request().height, 1)))
dock.connect("draw", aaa)
self.screen_changed(self, None, None)
self.show_all()
self.resize(100, 100)
Gtk.main()
def expose_draw(self, widget, event, userdata=None):
cr = Gdk.cairo_create(widget.get_window())
cr.scale(.2, .2)
if self.supports_alpha:
print("setting transparent window")
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
else:
print("setting opaque window")
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
def screen_changed(self, widget, old_screen, userdata=None):
screen = self.get_screen()
visual = screen.get_rgba_visual()
if visual is None:
visual = screen.get_system_visual()
self.supports_alpha = False
else:
self.supports_alpha = True
self.set_visual(visual)
if __name__ == "__main__":
DockWindow()
|
# Copyright (c) Facebook, Inc. and its affiliates.
""" Utility functions for processing point clouds.
Author: Charles R. Qi and Or Litany
"""
import os
import sys
import torch
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
# Mesh IO
import trimesh
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
"""Input is NxC, output is num_samplexC"""
if replace is None:
replace = pc.shape[0] < num_sample
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def shift_scale_points(pred_xyz, src_range, dst_range=None):
"""
pred_xyz: B x N x 3
src_range: [[B x 3], [B x 3]] - min and max XYZ coords
dst_range: [[B x 3], [B x 3]] - min and max XYZ coords
"""
if dst_range is None:
dst_range = [
torch.zeros((src_range[0].shape[0], 3), device=src_range[0].device),
torch.ones((src_range[0].shape[0], 3), device=src_range[0].device),
]
if pred_xyz.ndim == 4:
src_range = [x[:, None] for x in src_range]
dst_range = [x[:, None] for x in dst_range]
assert src_range[0].shape[0] == pred_xyz.shape[0]
assert dst_range[0].shape[0] == pred_xyz.shape[0]
assert src_range[0].shape[-1] == pred_xyz.shape[-1]
assert src_range[0].shape == src_range[1].shape
assert dst_range[0].shape == dst_range[1].shape
assert src_range[0].shape == dst_range[1].shape
src_diff = src_range[1][:, None, :] - src_range[0][:, None, :]
dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :]
prop_xyz = (
((pred_xyz - src_range[0][:, None, :]) * dst_diff) / src_diff
) + dst_range[0][:, None, :]
return prop_xyz
def scale_points(pred_xyz, mult_factor):
if pred_xyz.ndim == 4:
mult_factor = mult_factor[:, None]
scaled_xyz = pred_xyz * mult_factor[:, None, :]
return scaled_xyz
def rotate_point_cloud(points, rotation_matrix=None):
"""Input: (n,3), Output: (n,3)"""
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array(
[[cosval, sinval, 0], [-sinval, cosval, 0], [0, 0, 1]]
)
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
"""Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
def point_cloud_to_bbox(points):
"""Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox(scene_bbox, out_filename, colors=None):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if colors is not None:
if colors.shape[0] != len(scene_bbox):
colors = [colors for _ in range(len(scene_bbox))]
colors = np.array(colors).astype(np.uint8)
assert colors.shape[0] == len(scene_bbox)
assert colors.shape[1] == 4
scene = trimesh.scene.Scene()
for idx, box in enumerate(scene_bbox):
box_tr = convert_oriented_box_to_trimesh_fmt(box)
if colors is not None:
box_tr.visual.main_color[:] = colors[idx]
box_tr.visual.vertex_colors[:] = colors[idx]
for facet in box_tr.facets:
box_tr.visual.face_colors[facet] = colors[idx]
scene.add_geometry(box_tr)
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(
trimesh.creation.cylinder(
radius=rad, height=height, sections=res, transform=M
)
)
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, "%s.ply" % (filename), file_type="ply")
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
def imshow(image, dimshuffle = None, rescale = False, complementary = False):
'''
>>> plot the images after some transformation
'''
if dimshuffle != None:
image = np.transpose(image, dimshuffle)
if isinstance(rescale, bool) and rescale == True: # Whether or not to apply auto-rescaling
if np.max(image) - np.min(image) < 1e-8:
image = np.zeros_like(image)
else:
image = (image - np.min(image)) / (np.max(image) - np.min(image))
if isinstance(rescale, (list, tuple)): # rescale = [bias, scale]
image = (image - rescale[0]) / rescale[1]
if complementary == True:
image = 1. - image
image = np.clip(image, a_min = 0.0, a_max = 1.0)
channel_num = image.shape[-1]
if channel_num == 3:
plt.imshow(image)
elif channel_num == 1:
stacked_image = np.concatenate([image, image, image], axis = 2)
plt.imshow(stacked_image)
else:
raise ValueError('Unsupported channel num: %d'%channel_num)
plt.xticks([])
plt.yticks([])
def imselect(image, feature_num, dimshuffle = None, rescale = False, complementary = False):
'''
>>> plot the image only highlighting the top features
'''
# Transformation
if dimshuffle != None:
image = np.transpose(image, dimshuffle)
if rescale == True:
if np.max(image) - np.min(image) < 1e-8:
image = np.zeros_like(image)
else:
image = (image - np.min(image)) / (np.max(image) - np.min(image))
if complementary == True:
image = 1. - image
image = np.clip(image, a_min = 0.0, a_max = 1.0)
channel_num = image.shape[-1]
# Select
norms = np.linalg.norm(image, axis = 2).reshape(-1)
threshold = np.sort(norms)[-feature_num]
mask = [1. if v >= threshold else 0. for v in norms]
image = image * np.array(mask, dtype = np.float32).reshape(image.shape[0], image.shape[1], 1)
if channel_num == 3:
plt.imshow(image)
elif channel_num == 1:
stacked_image = np.concatenate([image, image, image], axis = 2)
plt.imshow(stacked_image)
else:
raise ValueError('Unsupported channel num: %d'%channel_num)
plt.xticks([])
plt.yticks([])
|
from github_secrets.cli import cli
cli() |
# -*- coding: utf-8 -*-
#
# Copyright 2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functions used to generate or write output from the decision tree models
"""
import sys
import os
import math
import keyword
from functools import reduce, partial
from bigml.path import Path, BRIEF
from bigml.basemodel import print_importance
from bigml.io import UnicodeWriter
from bigml.util import markdown_cleanup, prefix_as_comment, utf8, NUMERIC
from bigml.predicate import Predicate
from bigml.model import PYTHON_CONV
from bigml.predict_utils.common import missing_branch, \
none_value, get_node, get_predicate
from bigml.predicate_utils.utils import predicate_to_rule, \
LT, LE, EQ, NE, GE, GT, IN, to_lisp_rule, INVERSE_OP
from bigml.tree_utils import MAX_ARGS_LENGTH, tableau_string, slugify, \
sort_fields, TM_TOKENS, TM_ALL, TM_FULL_TERM, TERM_OPTIONS, ITEM_OPTIONS, \
PYTHON_OPERATOR
from bigml.generators.tree import plug_in_body
from bigml.generators.boosted_tree import boosted_plug_in_body
from bigml.generators.tree import filter_nodes
# templates for static Python
BIGML_SCRIPT = os.path.dirname(__file__)
TERM_TEMPLATE = "%s/static/term_analysis.txt" % BIGML_SCRIPT
ITEMS_TEMPLATE = "%s/static/items_analysis.txt" % BIGML_SCRIPT
HADOOP_CSV_TEMPLATE = "%s/static/python_hadoop_csv.txt" % \
BIGML_SCRIPT
HADOOP_NEXT_TEMPLATE = "%s/static/python_hadoop_next.txt" % \
BIGML_SCRIPT
HADOOP_REDUCER_TEMPLATE = "%s/static/python_hadoop_reducer.txt" % \
BIGML_SCRIPT
DEFAULT_IMPURITY = 0.2
INDENT = ' '
DFT_ATTR = "output"
MISSING_OPERATOR = {
EQ: "is",
NE: "is not"
}
T_MISSING_OPERATOR = {
EQ: "ISNULL(",
NE: "NOT ISNULL("
}
def print_distribution(distribution, out=sys.stdout):
"""Prints distribution data
"""
total = reduce(lambda x, y: x + y,
[group[1] for group in distribution])
for group in distribution:
out.write(utf8(
" %s: %.2f%% (%d instance%s)\n" % (
group[0],
round(group[1] * 1.0 / total, 4) * 100,
group[1],
"" if group[1] == 1 else "s")))
def list_fields(model, out=sys.stdout):
"""Prints descriptions of the fields for this model.
"""
out.write(utf8('<%-32s : %s>\n' % (
model.fields[model.objective_id]['name'],
model.fields[model.objective_id]['optype'])))
out.flush()
for field in [(val['name'], val['optype']) for key, val in
sort_fields(model.fields)
if key != model.objective_id]:
out.write(utf8('[%-32s : %s]\n' % (field[0], field[1])))
out.flush()
return model.fields
def gini_impurity(distribution, count):
"""Returns the gini impurity score associated to the distribution
in the node
"""
purity = 0.0
if distribution is None:
return None
for _, instances in distribution:
purity += math.pow(instances / float(count), 2)
return 1.0 - purity
def get_leaves(model, path=None, filter_function=None):
"""Returns a list that includes all the leaves of the tree.
"""
leaves = []
if path is None:
path = []
offsets = model.offsets
def get_tree_leaves(tree, fields, path, leaves, filter_function=None):
node = get_node(tree)
predicate = get_predicate(tree)
if isinstance(predicate, list):
[operator, field, value, term, missing] = get_predicate(tree)
path.append(to_lisp_rule(operator, field, value, term, missing,
fields[field]))
children_number = node[offsets["children#"]]
children = [] if children_number == 0 else node[offsets["children"]]
if children:
for child in children:
leaves += get_tree_leaves(child, fields,
path[:], leaves,
filter_function=filter_function)
else:
leaf = {
'id': node[offsets["id"]],
'confidence': node[offsets["confidence"]],
'count': node[offsets["count"]],
'distribution': node[offsets["distribution"]],
'impurity': gini_impurity(node[offsets["distribution"]],
node[offsets["count"]]),
'output': node[offsets["output"]],
'path': path}
if 'weighted_distribution' in offsets:
leaf.update( \
{"weighted_distribution": node[offsets[ \
"weighted_distribution"]],
"weight": node[offsets["weight"]]})
if (not hasattr(filter_function, '__call__')
or filter_function(leaf)):
leaves += [leaf]
return leaves
return get_tree_leaves(model.tree, model.fields, path, leaves,
filter_function)
def impure_leaves(model, impurity_threshold=DEFAULT_IMPURITY):
"""Returns a list of leaves that are impure
"""
if model.regression or model.boosting:
raise AttributeError("This method is available for non-boosting"
" categorization models only.")
def is_impure(node, impurity_threshold=impurity_threshold):
"""Returns True if the gini impurity of the node distribution
goes above the impurity threshold.
"""
return node.get('impurity') > impurity_threshold
is_impure = partial(is_impure, impurity_threshold=impurity_threshold)
return get_leaves(model, filter_function=is_impure)
def docstring(model):
"""Returns the docstring describing the model.
"""
objective_name = model.fields[model.objective_id]['name'] if \
not model.boosting else \
model.fields[model.boosting["objective_field"]]['name']
docstring_cmt = ("Predictor for %s from %s\n" % (
objective_name,
model.resource_id))
model.description = (
str(
markdown_cleanup(model.description).strip()) or
'Predictive model by BigML - Machine Learning Made Easy')
docstring_cmt += "\n" + INDENT * 2 + (
"%s" % prefix_as_comment(INDENT * 2, model.description))
return docstring_cmt
def build_ids_map(tree, offsets, ids_map, parent_id=None):
"""Builds a map for the tree from each node id to its parent
"""
node = get_node(tree)
node_id = node[offsets["id"]]
ids_map[node_id] = parent_id
children_number = node[offsets["children#"]]
children = [] if children_number == 0 else node[offsets["children"]]
for child in children:
build_ids_map(child, offsets, ids_map, node_id)
def fill_ids_map(model):
"""Filling the parent, child map
"""
if not (hasattr(model, "ids_map") and model.ids_map):
model.ids_map = {}
build_ids_map(model.tree, model.offsets, model.ids_map)
return model
def get_ids_path(model, filter_id):
"""Builds the list of ids that go from a given id to the tree root
"""
model = fill_ids_map(model)
ids_path = []
if filter_id is not None and model.tree[model.offsets["id"]] is not None:
if filter_id not in model.ids_map:
raise ValueError("The given id does not exist.")
ids_path = [filter_id]
last_id = filter_id
while model.ids_map[last_id] is not None:
ids_path.append(model.ids_map[last_id])
last_id = model.ids_map[last_id]
return ids_path
def generate_rules(tree, offsets, objective_id, fields,
depth=0, ids_path=None, subtree=True):
"""Translates a tree model into a set of IF-THEN rules.
"""
rules_str = ""
node = get_node(tree)
children_number = node[offsets["children#"]]
children = [] if children_number == 0 else node[offsets["children"]]
children = filter_nodes(children, offsets, ids=ids_path,
subtree=subtree)
if children:
for child in children:
predicate = get_predicate(child)
if isinstance(predicate, list):
[operator, field, value, term, missing] = predicate
child_node = get_node(child)
rules_str += ("%s IF %s %s\n" %
(INDENT * depth,
predicate_to_rule(operator, fields[field],
value, term, missing,
label='slug'),
"AND" if child_node[offsets["children#"]] > 0
else "THEN"))
rules_str += generate_rules(child, offsets, objective_id, fields,
depth + 1, ids_path=ids_path,
subtree=subtree)
else:
rules_str += ("%s %s = %s\n" %
(INDENT * depth,
(fields[objective_id]['slug']
if objective_id else "Prediction"),
node[offsets["output"]]))
return rules_str
def rules(model, out=sys.stdout, filter_id=None, subtree=True):
"""Returns a IF-THEN rule set that implements the model.
`out` is file descriptor to write the rules.
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
ids_path = get_ids_path(model, filter_id)
def tree_rules(tree, offsets, objective_id, fields,
out, ids_path=None, subtree=True):
"""Prints out an IF-THEN rule version of the tree.
"""
for field in sort_fields(fields):
slug = slugify(fields[field[0]]['name'])
fields[field[0]].update(slug=slug)
out.write(utf8(generate_rules(tree, offsets, objective_id,
fields,
ids_path=ids_path,
subtree=subtree)))
out.flush()
return tree_rules(model.tree, model.offsets, model.objective_id,
model.fields, out,
ids_path=ids_path, subtree=subtree)
def python(model, out=sys.stdout, hadoop=False,
filter_id=None, subtree=True):
"""Returns a basic python function that implements the model.
`out` is file descriptor to write the python code.
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
ids_path = get_ids_path(model, filter_id)
if hadoop:
return (hadoop_python_mapper(model, out=out,
ids_path=ids_path,
subtree=subtree) or
hadoop_python_reducer(out=out))
return tree_python(model.tree, model.offsets, model.fields,
model.objective_id, model.boosting, out,
docstring(model), ids_path=ids_path, subtree=subtree)
def hadoop_python_mapper(model, out=sys.stdout, ids_path=None,
subtree=True):
"""Generates a hadoop mapper header to make predictions in python
"""
input_fields = [(value, key) for (key, value) in
sorted(list(model.inverted_fields.items()),
key=lambda x: x[1])]
parameters = [value for (key, value) in
input_fields if key != model.objective_id]
args = []
for field in input_fields:
slug = slugify(model.fields[field[0]]['name'])
model.fields[field[0]].update(slug=slug)
if field[0] != model.objective_id:
args.append("\"" + model.fields[field[0]]['slug'] + "\"")
with open(HADOOP_CSV_TEMPLATE) as template_handler:
output = template_handler.read() % ",".join(parameters)
output += "\n%sself.INPUT_FIELDS = [%s]\n" % \
((INDENT * 3), (",\n " + INDENT * 8).join(args))
input_types = []
prefixes = []
suffixes = []
count = 0
fields = model.fields
for key in [field[0] for field in input_fields
if field[0] != model.objective_id]:
input_type = ('None' if not fields[key]['datatype'] in
PYTHON_CONV
else PYTHON_CONV[fields[key]['datatype']])
input_types.append(input_type)
if 'prefix' in fields[key]:
prefixes.append("%s: %s" % (count,
repr(fields[key]['prefix'])))
if 'suffix' in fields[key]:
suffixes.append("%s: %s" % (count,
repr(fields[key]['suffix'])))
count += 1
static_content = "%sself.INPUT_TYPES = [" % (INDENT * 3)
formatter = ",\n%s" % (" " * len(static_content))
output += "\n%s%s%s" % (static_content,
formatter.join(input_types),
"]\n")
static_content = "%sself.PREFIXES = {" % (INDENT * 3)
formatter = ",\n%s" % (" " * len(static_content))
output += "\n%s%s%s" % (static_content,
formatter.join(prefixes),
"}\n")
static_content = "%sself.SUFFIXES = {" % (INDENT * 3)
formatter = ",\n%s" % (" " * len(static_content))
output += "\n%s%s%s" % (static_content,
formatter.join(suffixes),
"}\n")
with open(HADOOP_NEXT_TEMPLATE) as template_handler:
output += template_handler.read()
out.write(output)
out.flush()
tree_python(model.tree, model.offsets, model.fields, model.objective_id,
False if not hasattr(model, "boosting") else model.boosting,
out, docstring(model), ids_path=ids_path, subtree=subtree)
output = \
"""
csv = CSVInput()
for values in csv:
if not isinstance(values, bool):
print u'%%s\\t%%s' %% (repr(values), repr(predict_%s(values)))
\n\n
""" % fields[model.objective_id]['slug']
out.write(utf8(output))
out.flush()
def hadoop_python_reducer(out=sys.stdout):
"""Generates a hadoop reducer to make predictions in python
"""
with open(HADOOP_REDUCER_TEMPLATE) as template_handler:
output = template_handler.read()
out.write(utf8(output))
out.flush()
def tree_python(tree, offsets, fields, objective_id, boosting,
out, docstring_str, input_map=False,
ids_path=None, subtree=True):
"""Writes a python function that implements the model.
"""
args = []
args_tree = []
parameters = sort_fields(fields)
if not input_map:
input_map = len(parameters) > MAX_ARGS_LENGTH
reserved_keywords = keyword.kwlist if not input_map else None
prefix = "_" if not input_map else ""
for field in parameters:
field_name_to_show = fields[field[0]]['name'].strip()
if field_name_to_show == "":
field_name_to_show = field[0]
slug = slugify(field_name_to_show,
reserved_keywords=reserved_keywords, prefix=prefix)
fields[field[0]].update(slug=slug)
if not input_map:
if field[0] != objective_id:
args.append("%s=None" % (slug))
args_tree.append("%s=%s" % (slug, slug))
if input_map:
args.append("data={}")
args_tree.append("data=data")
function_name = fields[objective_id]['slug'] if \
not boosting else fields[boosting["objective_field"]]['slug']
if prefix == "_" and function_name[0] == prefix:
function_name = function_name[1:]
if function_name == "":
function_name = "field_" + objective_id
python_header = "# -*- coding: utf-8 -*-\n"
predictor_definition = ("def predict_%s" %
function_name)
depth = len(predictor_definition) + 1
predictor = "%s(%s):\n" % (predictor_definition,
(",\n" + " " * depth).join(args))
predictor_doc = (INDENT + "\"\"\" " + docstring_str +
"\n" + INDENT + "\"\"\"\n")
body_fn = boosted_plug_in_body if boosting else plug_in_body
body, term_analysis_predicates, item_analysis_predicates = \
body_fn(tree, offsets, fields, objective_id,
fields[objective_id]["optype"] == NUMERIC,
input_map=input_map,
ids_path=ids_path, subtree=subtree)
terms_body = ""
if term_analysis_predicates or item_analysis_predicates:
terms_body = term_analysis_body(fields,
term_analysis_predicates,
item_analysis_predicates)
predictor = python_header + predictor + \
predictor_doc + terms_body + body
predictor_model = "def predict"
depth = len(predictor_model) + 1
predictor += "\n\n%s(%s):\n" % (predictor_model,
(",\n" + " " * depth).join(args))
predictor += "%sprediction = predict_%s(%s)\n" % ( \
INDENT, function_name, ", ".join(args_tree))
if boosting is not None:
predictor += "%sprediction.update({\"weight\": %s})\n" % \
(INDENT, boosting.get("weight"))
if boosting.get("objective_class") is not None:
predictor += "%sprediction.update({\"class\": \"%s\"})\n" % \
(INDENT, boosting.get("objective_class"))
predictor += "%sreturn prediction" % INDENT
out.write(utf8(predictor))
out.flush()
def term_analysis_body(fields, term_analysis_predicates,
item_analysis_predicates):
""" Writes auxiliary functions to handle the term and item
analysis fields
"""
body = """
import re
"""
# static content
if term_analysis_predicates:
body += """
tm_tokens = '%s'
tm_full_term = '%s'
tm_all = '%s'
""" % (TM_TOKENS, TM_FULL_TERM, TM_ALL)
with open(TERM_TEMPLATE) as template_handler:
body += template_handler.read()
term_analysis_options = {predicate[0] for predicate in
term_analysis_predicates}
term_analysis_predicates = set(term_analysis_predicates)
body += """
term_analysis = {"""
for field_id in term_analysis_options:
field = fields[field_id]
body += """
\"%s\": {""" % field['slug']
options = sorted(field['term_analysis'].keys())
for option in options:
if option in TERM_OPTIONS:
body += """
\"%s\": %s,""" % (option, repr(field['term_analysis'][option]))
body += """
},"""
body += """
}"""
body += """
term_forms = {"""
term_forms = {}
for field_id, term in term_analysis_predicates:
alternatives = []
field = fields[field_id]
if field['slug'] not in term_forms:
term_forms[field['slug']] = {}
all_forms = field['summary'].get('term_forms', {})
if all_forms:
alternatives = all_forms.get(term, [])
if alternatives:
terms = [term]
terms.extend(all_forms.get(term, []))
term_forms[field['slug']][term] = terms
for field in term_forms:
body += """
\"%s\": {""" % field
terms = sorted(term_forms[field].keys())
for term in terms:
body += """
\"%s\": %s,""" % (term, term_forms[field][term])
body += """
},"""
body += """
}
"""
if item_analysis_predicates:
with open(ITEMS_TEMPLATE) as template_handler:
body += template_handler.read()
item_analysis_options = {predicate[0] for predicate in
item_analysis_predicates}
item_analysis_predicates = set(item_analysis_predicates)
body += """
item_analysis = {"""
for field_id in item_analysis_options:
field = fields[field_id]
body += """
\"%s\": {""" % field['slug']
for option in field['item_analysis']:
if option in ITEM_OPTIONS:
body += """
\"%s\": %s,""" % (option, repr(field['item_analysis'][option]))
body += """
},"""
body += """
}
"""
return body
def tableau(model, out=sys.stdout, hadoop=False,
filter_id=None, subtree=True, attr=DFT_ATTR):
"""Returns a basic tableau function that implements the model.
`out` is file descriptor to write the tableau code.
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
ids_path = get_ids_path(model, filter_id)
if hadoop:
return "Hadoop output not available."
response = tree_tableau(model.tree, model.offsets, model.fields,
model.objective_id,
out, ids_path=ids_path,
subtree=subtree, attr=attr)
if response:
out.write("END\n")
else:
out.write("\nThis function cannot be represented "
"in Tableau syntax.\n")
out.flush()
return None
def tableau_body(tree, offsets, fields, objective_id,
body="", conditions=None, cmv=None,
ids_path=None, subtree=True, attr=DFT_ATTR):
"""Translate the model into a set of "if" statements in Tableau syntax
`depth` controls the size of indentation. As soon as a value is missing
that node is returned without further evaluation.
"""
if cmv is None:
cmv = []
if body:
alternate = "ELSEIF"
else:
if conditions is None:
conditions = []
alternate = "IF"
node = get_node(tree)
children_number = node[offsets["children#"]]
children = [] if children_number == 0 else node[offsets["children"]]
children = filter_nodes(children, offsets, ids=ids_path,
subtree=subtree)
if children:
[_, field, _, _, _] = get_predicate(children[0])
has_missing_branch = (missing_branch(children) or
none_value(children))
# the missing is singled out as a special case only when there's
# no missing branch in the children list
if (not has_missing_branch and
fields[field]['name'] not in cmv):
conditions.append("ISNULL([%s])" % fields[field]['name'])
body += ("%s %s THEN " %
(alternate, " AND ".join(conditions)))
if fields[objective_id]['optype'] == 'numeric':
value = node[offsets[attr]]
else:
value = tableau_string(node[offsets[attr]])
body += ("%s\n" % value)
cmv.append(fields[field]['name'])
alternate = "ELSEIF"
del conditions[-1]
for child in children:
pre_condition = ""
post_condition = ""
[operator, field, ch_value, _, missing] = get_predicate(child)
if has_missing_branch and ch_value is not None:
negation = "" if missing else "NOT "
connection = "OR" if missing else "AND"
pre_condition = (
"(%sISNULL([%s]) %s " % (
negation, fields[field]['name'], connection))
if not missing:
cmv.append(fields[field]['name'])
post_condition = ")"
optype = fields[field]['optype']
if ch_value is None:
value = ""
elif optype in ['text', 'items']:
return ""
elif optype == 'numeric':
value = ch_value
else:
value = repr(ch_value)
operator = ("" if ch_value is None else
PYTHON_OPERATOR[operator])
if ch_value is None:
pre_condition = (
T_MISSING_OPERATOR[operator])
post_condition = ")"
conditions.append("%s[%s]%s%s%s" % (
pre_condition,
fields[field]['name'],
operator,
value,
post_condition))
body = tableau_body(child, offsets, fields, objective_id,
body, conditions[:], cmv=cmv[:],
ids_path=ids_path, subtree=subtree, attr=attr)
del conditions[-1]
else:
if fields[objective_id]['optype'] == 'numeric':
value = tree[offsets[attr]]
else:
value = tableau_string(node[offsets[attr]])
body += (
"%s %s THEN" % (alternate, " AND ".join(conditions)))
body += " %s\n" % value
return body
def tree_tableau(tree, offsets, fields, objective_id,
out, ids_path=None, subtree=True, attr=DFT_ATTR):
"""Writes a Tableau function that implements the model.
"""
body = tableau_body(tree, offsets, fields, objective_id,
ids_path=ids_path, subtree=subtree, attr=attr)
if not body:
return False
out.write(utf8(body))
out.flush()
return True
def group_prediction(model):
"""Groups in categories or bins the predicted data
dict - contains a dict grouping counts in 'total' and 'details' lists.
'total' key contains a 3-element list.
- common segment of the tree for all instances
- data count
- predictions count
'details' key contains a list of elements. Each element is a
3-element list:
- complete path of the tree from the root to the leaf
- leaf predictions count
- confidence
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
groups = {}
tree = model.tree
node = get_node(tree)
offsets = model.offsets
distribution = node[offsets["distribution"]]
for group in distribution:
groups[group[0]] = {'total': [[], group[1], 0],
'details': []}
path = []
def add_to_groups(groups, output, path, count, confidence,
impurity=None):
"""Adds instances to groups array
"""
group = output
if output not in groups:
groups[group] = {'total': [[], 0, 0],
'details': []}
groups[group]['details'].append([path, count, confidence,
impurity])
groups[group]['total'][2] += count
def depth_first_search(tree, path):
"""Search for leafs' values and instances
"""
node = get_node(tree)
predicate = get_predicate(tree)
if isinstance(predicate, list):
[operation, field, value, term, _] = predicate
operator = INVERSE_OP[operation]
path.append(Predicate(operator, field, value, term))
if term:
if field not in model.terms:
model.terms[field] = []
if term not in model.terms[field]:
model.terms[field].append(term)
if node[offsets["children#"]] == 0:
add_to_groups(groups, node[offsets["output"]],
path, node[offsets["count"]],
node[offsets["confidence"]],
gini_impurity(node[offsets["distribution"]],
node[offsets["count"]]))
return node[offsets["count"]]
children = node[offsets["children"]][:]
children.reverse()
children_sum = 0
for child in children:
children_sum += depth_first_search(child, path[:])
if children_sum < node[offsets["count"]]:
add_to_groups(groups, node[offsets["output"]], path,
node[offsets["count"]] - children_sum,
node[offsets["confidence"]],
gini_impurity(node[offsets["distribution"]],
node[offsets["count"]]))
return node[offsets["count"]]
depth_first_search(tree, path)
return groups
def get_data_distribution(model):
"""Returns training data distribution
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
node = get_node(model.tree)
distribution = node[model.offsets["distribution"]]
return sorted(distribution, key=lambda x: x[0])
def get_prediction_distribution(model, groups=None):
"""Returns model predicted distribution
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
if groups is None:
groups = group_prediction(model)
predictions = [[group, groups[group]['total'][2]] for group in groups]
# remove groups that are not predicted
predictions = [prediction for prediction in predictions \
if prediction[1] > 0]
return sorted(predictions, key=lambda x: x[0])
def summarize(model, out=sys.stdout, format=BRIEF):
"""Prints summary grouping distribution as class header and details
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
tree = model.tree
def extract_common_path(groups):
"""Extracts the common segment of the prediction path for a group
"""
for group in groups:
details = groups[group]['details']
common_path = []
if len(details) > 0:
mcd_len = min([len(x[0]) for x in details])
for i in range(0, mcd_len):
test_common_path = details[0][0][i]
for subgroup in details:
if subgroup[0][i] != test_common_path:
i = mcd_len
break
if i < mcd_len:
common_path.append(test_common_path)
groups[group]['total'][0] = common_path
if len(details) > 0:
groups[group]['details'] = sorted(details,
key=lambda x: x[1],
reverse=True)
def confidence_error(value, impurity=None):
"""Returns confidence for categoric objective fields
and error for numeric objective fields
"""
if value is None:
return ""
impurity_literal = ""
if impurity is not None and impurity > 0:
impurity_literal = "; impurity: %.2f%%" % (round(impurity, 4))
objective_type = model.fields[model.objective_id]['optype']
if objective_type == 'numeric':
return " [Error: %s]" % value
return " [Confidence: %.2f%%%s]" % (round(value, 4) * 100,
impurity_literal)
distribution = get_data_distribution(model)
out.write(utf8("Data distribution:\n"))
print_distribution(distribution, out=out)
out.write(utf8("\n\n"))
groups = group_prediction(model)
predictions = get_prediction_distribution(model, groups)
out.write(utf8("Predicted distribution:\n"))
print_distribution(predictions, out=out)
out.write(utf8("\n\n"))
if model.field_importance:
out.write(utf8("Field importance:\n"))
print_importance(model, out=out)
extract_common_path(groups)
out.write(utf8("\n\nRules summary:"))
node = get_node(tree)
count = node[model.offsets["count"]]
for group in [x[0] for x in predictions]:
details = groups[group]['details']
path = Path(groups[group]['total'][0])
data_per_group = groups[group]['total'][1] * 1.0 / count
pred_per_group = groups[group]['total'][2] * 1.0 / count
out.write(utf8("\n\n%s : (data %.2f%% / prediction %.2f%%) %s" %
(group,
round(data_per_group, 4) * 100,
round(pred_per_group, 4) * 100,
path.to_rules(model.fields, format=format))))
if len(details) == 0:
out.write(utf8("\n The model will never predict this"
" class\n"))
elif len(details) == 1:
subgroup = details[0]
out.write(utf8("%s\n" % confidence_error(
subgroup[2], impurity=subgroup[3])))
else:
out.write(utf8("\n"))
for subgroup in details:
pred_per_sgroup = subgroup[1] * 1.0 / \
groups[group]['total'][2]
path = Path(subgroup[0])
path_chain = path.to_rules(model.fields, format=format) if \
path.predicates else "(root node)"
out.write(utf8(" · %.2f%%: %s%s\n" %
(round(pred_per_sgroup, 4) * 100,
path_chain,
confidence_error(subgroup[2],
impurity=subgroup[3]))))
out.flush()
def get_nodes_info(model, headers, leaves_only=False):
"""Generator that yields the nodes information in a row format
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
def get_tree_nodes_info(tree, offsets, regression, fields, objective_id,
headers=None, leaves_only=False):
"""Yields the information associated to each of the tree nodes
"""
row = []
node = get_node(tree)
if not regression:
category_dict = dict(node[offsets["distribution"]])
for header in headers:
if header == fields[objective_id]['name']:
row.append(node[offsets["output"]])
continue
if header in ['confidence', 'error']:
row.append(node[offsets["confidence"]])
continue
if header == 'impurity':
row.append(gini_impurity(node[offsets["distribution"]],
node[offsets["count"]]))
continue
if regression and header.startswith('bin'):
for bin_value, bin_instances in node[offsets["distribution"]]:
row.append(bin_value)
row.append(bin_instances)
break
if not regression:
row.append(category_dict.get(header))
while len(row) < len(headers):
row.append(None)
if not leaves_only or not tree.children:
yield row
if node[offsets["children#"]] > 0:
for child in node[offsets["children"]]:
for row in get_tree_nodes_info(child, offsets, regression,
fields, objective_id, headers,
leaves_only=leaves_only):
yield row
return get_tree_nodes_info(model.tree,
model.offsets,
model.regression,
model.fields,
model.objective_id,
headers, leaves_only=leaves_only)
def tree_csv(model, file_name=None, leaves_only=False):
"""Outputs the node structure to a CSV file or array
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
headers_names = []
if model.regression:
headers_names.append(
model.fields[model.objective_id]['name'])
headers_names.append("error")
max_bins = get_node(model.tree)[model.offsets["max_bins"]]
for index in range(0, max_bins):
headers_names.append("bin%s_value" % index)
headers_names.append("bin%s_instances" % index)
else:
headers_names.append(
model.fields[model.objective_id]['name'])
headers_names.append("confidence")
headers_names.append("impurity")
node = get_node(model.tree)
for category, _ in node[model.offsets["distribution"]]:
headers_names.append(category)
nodes_generator = get_nodes_info(model, headers_names,
leaves_only=leaves_only)
if file_name is not None:
with UnicodeWriter(file_name) as writer:
writer.writerow([utf8(header)
for header in headers_names])
for row in nodes_generator:
writer.writerow([item if not isinstance(item, str)
else utf8(item)
for item in row])
return file_name
rows = []
rows.append(headers_names)
for row in nodes_generator:
rows.append(row)
return rows
|
<<<<<<< HEAD
print("Enter the 1st no:")
a=int(input())
print("Enter the 2nd no:")
b=int(input())
m=max(a,b)
print("Maximum:",m)
=======
print("Enter the 1st no:")
a=int(input())
print("Enter the 1st no:")
b=int(input())
m=max(a,b)
print("Maximum:",m)
>>>>>>> 2257f55f5843ae1dad09e255fabb5f6ceef87af7
|
import tensorflow as tf
import os
LAYER_SIZE = 350
PROB_WIN_LAYER_SIZE_1 = 100
PROB_WIN_LAYER_SIZE_2 = 50
TENSORFLOW_SAVE_FILE = 'agent'
TENSORFLOW_CHECKPOINT_FOLDER = 'tensorflow_checkpoint'
class Model:
""" Neural network to implement deep Q-learning with memory
"""
def __init__(self, num_states, num_actions, batch_size, restore, sess):
self.num_states = num_states
self.num_actions = num_actions
self.batch_size = batch_size
# define the placeholders
self.states = None
self.actions = None
# the output operations
self.logits = None
self.optimizer = None
# now setup the model
self.define_model()
self.saver = tf.train.Saver()
self.init_variables = tf.global_variables_initializer()
self.sess = sess
if restore:
self.load()
else:
self.sess.run(self.init_variables)
def save(self):
""" save model parameters to file"""
local = self.saver.save(self.sess, "./" + TENSORFLOW_CHECKPOINT_FOLDER + "/" + TENSORFLOW_SAVE_FILE)
print("saved to ", local)
def load(self):
""" load model parameters from file"""
self.saver.restore(self.sess, "./" + TENSORFLOW_CHECKPOINT_FOLDER + "/" + TENSORFLOW_SAVE_FILE)
def define_model(self):
""" builds a simple tensorflow dense neural network that accepts the state and computes the action."""
self.states = tf.placeholder(shape=[None, self.num_states], dtype=tf.float32)
self.q_s_a = tf.placeholder(shape=[None, self.num_actions], dtype=tf.float32)
# create a couple of fully connected hidden layers
fc1 = tf.layers.dense(self.states, LAYER_SIZE, activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, LAYER_SIZE, activation=tf.nn.relu)
self.logits = tf.layers.dense(fc2, self.num_actions)
self.loss = tf.losses.mean_squared_error(self.q_s_a, self.logits)
self.optimizer = tf.train.AdamOptimizer().minimize(self.loss)
def get_num_actions(self):
""" Returns the number of possible actions """
return self.num_actions
def get_num_states(self):
""" Returns the length of the input state """
return self.num_states
def get_batch_size(self):
""" Returns the batch size """
return self.batch_size
def predict_one(self, state):
""" Run the state ( which is state.asVector() ) through the model and return the predicted q values """
return self.sess.run(self.logits, feed_dict={self.states: state.reshape(1, self.num_states)})
def predict_batch(self, states):
""" Run a batch of states through the model and return a batch of q values. """
return self.sess.run(self.logits, feed_dict={self.states: states})
def train_batch(self, x_batch, y_batch):
""" Trains the model with a batch of X (state) -> Y (reward) examples """
return self.sess.run([self.optimizer, self.loss], feed_dict={self.states: x_batch, self.q_s_a: y_batch}) |
# -*- coding: utf-8 -*-
"""Tests of the internal tabulate functions."""
from __future__ import print_function
from __future__ import unicode_literals
import tabulate as T
from common import assert_equal, assert_in, assert_raises, SkipTest
def test_multiline_width():
"Internal: _multiline_width()"
multiline_string = "\n".join(["foo", "barbaz", "spam"])
assert_equal(T._multiline_width(multiline_string), 6)
oneline_string = "12345"
assert_equal(T._multiline_width(oneline_string), len(oneline_string))
def test_align_column_decimal():
"Internal: _align_column(..., 'decimal')"
column = ["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"]
output = T._align_column(column, "decimal")
expected = [
' 12.345 ',
'-1234.5 ',
' 1.23 ',
' 1234.5 ',
' 1e+234 ',
' 1.0e234']
assert_equal(output, expected)
def test_align_column_none():
"Internal: _align_column(..., None)"
column = ['123.4', '56.7890']
output = T._align_column(column, None)
expected = ['123.4', '56.7890']
assert_equal(output, expected)
def test_align_column_multiline():
"Internal: _align_column(..., is_multiline=True)"
column = ["1", "123", "12345\n6"]
output = T._align_column(column, "center", is_multiline=True)
expected = [
" 1 ",
" 123 ",
"12345" + "\n" +
" 6 "]
assert_equal(output, expected)
|
#!/usr/bin/python
# encoding: utf-8
"""
@auth: zhaopan
@time: 2018/4/11 09:58
"""
from proxy2 import *
class ProxyZCBLHandler(ProxyRequestHandler):
def request_handler(self, req, req_body):
pass
def response_handler(self, req, req_body, res, res_body):
if req.path == 'https://bjjj.zhongchebaolian.com/app_web/jsp/homepage.jsp':
import requests
res2 = requests.get('https://qdota.com/homepage_chunnel.jsp')
return res2.content
if __name__ == '__main__':
start_server(HandlerClass=ProxyZCBLHandler)
|
from pyston.contrib.dynamo.paginator import DynamoCursorBasedPaginator as OriginDynamoCursorBasedPaginator
class DynamoCursorBasedPaginator(OriginDynamoCursorBasedPaginator):
type = 'cursor-based-paginator'
|
def foo():
print("in foo")
def bar():
print("in bar")
my_var = 0
print("importing test module")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
scenario_filenames = ["OUTPUT_110011_20201117123025"]
scenario_labels =["Lockdown enabled,Self Isolation,Mask Compliance (0.5)"]
MAX_DAY = 250#250#120
POPULATION = 10000.0
FIGSIZE = [20,10]
plt.rcParams.update({'font.size': 22})
#### comparison of infections
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100*dfg["Infected_count"].values/POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Infected_count"])))+[MAX_DAY],list(100*dfg["Infected_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Infected (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/infected_count_comparison.png")
#### comparison of deaths
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100 * dfg["Death_count"].values / POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Death_count"])))+[MAX_DAY],list(100*dfg["Death_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Deceased (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/death_count_comparison.png")
#### comparison of recoveries
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100 * dfg["Recovered_count"].values / POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["Recovered_count"])))+[MAX_DAY],list(100*dfg["Recovered_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Recovered (% of Population)")
plt.subplots_adjust(right=0.98,left=0.08)
plt.savefig("analyze_simulation_output/recovered_count_comparison.png")
#### comparison of number of notifications
try:
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (100*dfg["notified_count"].values/POPULATION)[-1]
plt.plot(list(np.arange(len(dfg["notified_count"])))+[MAX_DAY],list(100*dfg["notified_count"].values/POPULATION)+[last_val],label=scenario_labels[i])
#plt.plot([0,70],[5,5],"--",c='grey')
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("% of population notified to isolate")
plt.subplots_adjust(right=0.98, left=0.08)
plt.savefig("analyze_simulation_output/notified_count_comparison.png")
except Exception as e:
pass
# compare locked zones
try:
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
if True:#i in [1,3,4]:
simulation_file = "simulation_output/"+scenario_filenames[i]+".csv"
df = pd.read_csv(simulation_file)
dfg = df.groupby("Date").mean()
last_val = (dfg["locked_zones"].values)[-1]
plt.plot(list(np.arange(len(dfg["locked_zones"])))+[MAX_DAY],list(dfg["locked_zones"].values)+[last_val],label=scenario_labels[i])
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Zone ID")
plt.subplots_adjust(right=0.98, left=0.08)
plt.savefig("analyze_simulation_output/locked_zones_comparison.png")
except Exception as e:
pass
# number of entities per zone:
try:
simulation_file = "simulation_output/" + scenario_filenames[0]
df = pd.read_csv(simulation_file+"_overall_agent_status.csv")
df["Date_Time"]=pd.to_datetime(df["Date_Time"])
dfg = df.query("Date_Time >'2020-01-01' and Date_Time <'2020-01-03'").drop_duplicates("currentLocationID")
residential_counts_zone = dfg.query("currentLocationType == 'residential'").groupby("zone_id").count()["id"].values
employment_counts_zone = dfg.query("currentLocationType == 'employment'").groupby("zone_id").count()["id"].values
school_counts_zone = dfg.query("currentLocationType == 'school'").groupby("zone_id").count()["id"].values
shopping_mall = dfg.query("currentLocationType == 'shopping_mall'").groupby("zone_id").count()["id"].values
zone_counts = np.vstack((residential_counts_zone,
employment_counts_zone,
shopping_mall,
school_counts_zone)).T
zone_counts = pd.DataFrame(zone_counts,columns=["residential","employment","shopping_mall","school"])
zone_counts.to_csv("analyze_simulation_output/locations_per_zone.csv")
except Exception as e:
print("error:",e)
# analyse individual user dataset
SHOPPING_MALL_VISITS = []
plt.figure(figsize=FIGSIZE)
for i in range(len(scenario_labels)):
simulation_file = "simulation_output/" + scenario_filenames[i]
df1=pd.read_csv(simulation_file+"_overall_agent_status.csv")
df1["Date_Time"]= pd.to_datetime(df1["Date_Time"])
df1["date"]=pd.to_datetime(df1.loc[:,"Date_Time"]).dt.date
#df1_gp = df1.groupby(["date", "currentLocationType"]).count()
df1_gp = df1.groupby(["date", "currentLocationType"])["id"].nunique().reset_index() # unique subs per entity per day
#df1_gp = df1_gp.reset_index()
df1_gp["date"] = pd.to_datetime(df1_gp["date"])
df1_gp_residential = df1_gp.query("currentLocationType == 'residential'").loc[:, ["date", "currentLocationType"]]
df1_gp_residential = df1_gp_residential.rename(columns={"currentLocationID": "residential"})
df_shopping_mall_market_visits=df1_gp.query("currentLocationType == 'shopping_mall' or currentLocationType == 'market'").groupby(
"date").mean().reset_index().loc[:, ["date", "id"]]
#df1_gp_shopping_mall = df1_gp.query("currentLocationType == 'shopping_mall'").loc[:, ["date", "currentLocationID"]]
#df1_gp_shopping_mall = df1_gp_shopping_mall.rename(columns={"currentLocationID": "shopping_mall"})
df_shopping_mall_market_visits = df_shopping_mall_market_visits.rename(columns={"id": "shopping_mall_market_visits"})
df1_gp_entity_visits = df1_gp_residential.merge(df_shopping_mall_market_visits, how="left", on="date")
df1_gp_entity_visits = df1_gp_entity_visits.fillna(0)
avg_daily_number_of_shopping_mall_visitors = int(np.average((df1_gp_entity_visits["shopping_mall_market_visits"]).values))
plt.plot(df1_gp_entity_visits["shopping_mall_market_visits"].values[:-1],label=scenario_labels[i])
print(avg_daily_number_of_shopping_mall_visitors)
SHOPPING_MALL_VISITS.append(avg_daily_number_of_shopping_mall_visitors)
plt.legend()
plt.xlabel("Days since outbreak")
plt.ylabel("Shopping mall and market visitors")
plt.subplots_adjust(right=0.98,left=0.08)
plt.ylim([0,4000])
plt.savefig("analyze_simulation_output/shopping_mall_visits.png")
POPULATIONS = []
INFECTIONS = []
DEATHS = []
RECOVERIES = []
for i in range(len(scenario_labels)):
simulation_file = "simulation_output/" + scenario_filenames[i] + ".txt"
with open(simulation_file) as txt_file:
str_txt = txt_file.readlines()
for line in str_txt:
if line.find("Number of Infected = ") != -1 :
INFECTIONS.append(line.split("Number of Infected = ")[1].split(" ")[0])
if line.find("Number of Recovered = ") != -1 :
RECOVERIES.append(line.split("Number of Recovered = ")[1].split(" ")[0])
if line.find("Number of Deaths = ") != -1 :
DEATHS.append(line.split("Number of Deaths = ")[1].split(" ")[0])
if line.find("Total Population = ") != -1:
POPULATIONS.append(line.split("Total Population = ")[1].split()[0])
df = pd.DataFrame(np.vstack((scenario_labels,POPULATIONS,INFECTIONS,RECOVERIES,DEATHS,SHOPPING_MALL_VISITS)).T,columns=["Scenario","Population","Number_infected","Number_recovered","Number_deceased","shopping_mall_visits"])
df.to_csv("analyze_simulation_output/overall_comparison.csv")
|
#!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from flask import Flask, Blueprint, request
from flask.views import MethodView
from rucio.api.exporter import export_data
from rucio.common.exception import RucioException
from rucio.common.utils import generate_http_error_flask, render_json
from rucio.web.rest.flaskapi.v1.common import before_request, after_request, check_accept_header_wrapper_flask
class Export(MethodView):
""" Export data. """
@check_accept_header_wrapper_flask(['application/json'])
def get(self):
""" Export data from Rucio.
.. :quickref: Export data
**Example request**:
.. sourcecode:: http
GET /export HTTP/1.1
Host: rucio.cern.ch
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{"rses": [{"rse": "MOCK", "rse_type": "TAPE"}], "distances": {}}
:resheader Content-Type: application/json
:status 200: DIDs found
:status 401: Invalid Auth Token
:status 406: Not Acceptable
:returns: dictionary with rucio data
"""
try:
return render_json(**export_data(issuer=request.environ.get('issuer')))
except RucioException as error:
return generate_http_error_flask(500, error.__class__.__name__, error.args[0])
bp = Blueprint('export', __name__)
export_view = Export.as_view('scope')
bp.add_url_rule('/', view_func=export_view, methods=['get', ])
application = Flask(__name__)
application.register_blueprint(bp)
application.before_request(before_request)
application.after_request(after_request)
def make_doc():
""" Only used for sphinx documentation to add the prefix """
doc_app = Flask(__name__)
doc_app.register_blueprint(bp, url_prefix='/export')
return doc_app
if __name__ == "__main__":
application.run()
|
# encoding=utf8
import datetime
import unittest
from wsme.api import FunctionArgument, FunctionDefinition
from wsme.rest.args import from_param, from_params, args_from_args
from wsme.exc import InvalidInput
from wsme.types import UserType, Unset, ArrayType, DictType
class MyUserType(UserType):
basetype = str
class DictBasedUserType(UserType):
basetype = DictType(int, int)
class TestProtocolsCommons(unittest.TestCase):
def test_from_param_date(self):
assert from_param(datetime.date, '2008-02-28') == \
datetime.date(2008, 2, 28)
def test_from_param_time(self):
assert from_param(datetime.time, '12:14:56') == \
datetime.time(12, 14, 56)
def test_from_param_datetime(self):
assert from_param(datetime.datetime, '2009-12-23T12:14:56') == \
datetime.datetime(2009, 12, 23, 12, 14, 56)
def test_from_param_usertype(self):
assert from_param(MyUserType(), 'test') == 'test'
def test_from_params_empty(self):
assert from_params(str, {}, '', set()) is Unset
def test_from_params_native_array(self):
class params(dict):
def getall(self, path):
return ['1', '2']
p = params({'a': []})
assert from_params(ArrayType(int), p, 'a', set()) == [1, 2]
def test_from_params_empty_array(self):
assert from_params(ArrayType(int), {}, 'a', set()) is Unset
def test_from_params_dict(self):
value = from_params(
DictType(int, str),
{'a[2]': 'a2', 'a[3]': 'a3'},
'a',
set()
)
assert value == {2: 'a2', 3: 'a3'}, value
def test_from_params_dict_unset(self):
assert from_params(DictType(int, str), {}, 'a', set()) is Unset
def test_from_params_usertype(self):
value = from_params(
DictBasedUserType(),
{'a[2]': '2'},
'a',
set()
)
self.assertEqual(value, {2: 2})
def test_args_from_args_usertype(self):
class FakeType(UserType):
name = 'fake-type'
basetype = int
fake_type = FakeType()
fd = FunctionDefinition(FunctionDefinition)
fd.arguments.append(FunctionArgument('fake-arg', fake_type, True, 0))
new_args = args_from_args(fd, [1], {})
self.assertEqual([1], new_args[0])
# can't convert str to int
try:
args_from_args(fd, ['invalid-argument'], {})
except InvalidInput as e:
assert fake_type.name in str(e)
else:
self.fail('Should have thrown an InvalidInput')
class ArgTypeConversion(unittest.TestCase):
def test_int_zero(self):
self.assertEqual(0, from_param(int, 0))
self.assertEqual(0, from_param(int, '0'))
def test_int_nonzero(self):
self.assertEqual(1, from_param(int, 1))
self.assertEqual(1, from_param(int, '1'))
def test_int_none(self):
self.assertEqual(None, from_param(int, None))
def test_float_zero(self):
self.assertEqual(0.0, from_param(float, 0))
self.assertEqual(0.0, from_param(float, 0.0))
self.assertEqual(0.0, from_param(float, '0'))
self.assertEqual(0.0, from_param(float, '0.0'))
def test_float_nonzero(self):
self.assertEqual(1.0, from_param(float, 1))
self.assertEqual(1.0, from_param(float, 1.0))
self.assertEqual(1.0, from_param(float, '1'))
self.assertEqual(1.0, from_param(float, '1.0'))
def test_float_none(self):
self.assertEqual(None, from_param(float, None))
|
import unittest
from mock import patch
from basset_client.git import get_git_info, parse_branch, parse_commit_message, parse_commit_info
class GitTest(unittest.TestCase):
@patch('basset_client.git.subprocess.Popen')
def test_parse_branch(self, mock_popen):
process = mock_popen.return_value
process.returncode = 0
process.communicate.return_value = [b'master', None]
branch = parse_branch()
self.assertEqual(branch, 'master')
@patch('basset_client.git.subprocess.Popen')
def test_error_parse_branch(self, mock_popen):
process = mock_popen.return_value
process.returncode = -1
process.communicate.return_value = [None, 'oh snap']
with self.assertRaises(AssertionError) as err:
_branch = parse_branch()
self.assertEqual(err.message, "oh snap")
@patch('basset_client.git.subprocess.Popen')
def test_parse_commit_message(self, mock_popen):
process = mock_popen.return_value
process.returncode = 0
process.communicate.return_value = [b'this\nis\na\ntest', None]
message = parse_commit_message()
assert message == 'this\nis\na\ntest'
@patch('basset_client.git.subprocess.Popen')
def test_parse_git_info(self, mock_popen):
process = mock_popen.return_value
process.returncode = 0
process.communicate.return_value = [
b'sha1234,commiterName,committerEmail,commitDate,authorName,authorEmail,authorDate', None]
[
commit_sha,
committer_name,
committer_email,
commit_date,
author_name,
author_email,
author_date,
] = parse_commit_info()
self.assertEqual(commit_sha, 'sha1234')
self.assertEqual(committer_name, 'commiterName')
self.assertEqual(committer_email, 'committerEmail')
self.assertEqual(commit_date, 'commitDate')
self.assertEqual(author_name, 'authorName')
self.assertEqual(author_email, 'authorEmail')
self.assertEqual(author_date, 'authorDate')
@patch('basset_client.git.parse_branch')
@patch('basset_client.git.parse_commit_message')
@patch('basset_client.git.parse_commit_info')
def test_get_git_info(self, mock_info, mock_message, mock_branch):
mock_info.return_value = [
'sha123',
'commiterName',
'committerEmail',
'commitDate',
'authorName',
'authorEmail',
'authorDate',
]
mock_message.return_value = 'message'
mock_branch.return_value = 'master'
info = get_git_info()
self.assertListEqual(info, [
'master',
'sha123',
'message',
'commiterName',
'committerEmail',
'commitDate',
'authorName',
'authorEmail',
'authorDate',
])
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
import glob
import al_consts
#Whelper class to calibrate camera and undo distortion from images
class AlCalibration:
def __init__(self, nx=9, ny=6 , cal_images=al_consts.CALIBRATION_IMAGES , test_images=al_consts.TEST_IMAGES ):
self.cal_images = cal_images
self.test_images = test_images
self.nx, self.ny = 9, 6
self.objp = np.zeros((nx * ny, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
def calibrate_camera(self):
objpoints = [] # 3D points
imgpoints = [] # 2D points
objp = np.zeros((self.nx * self.ny, 3), np.float32)
objp[:, :2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1, 2)
for file in self.cal_images:
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (self.nx, self.ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
#matrix, distortion coefficients, rotation and translation vector
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
self.mtx = mtx
self.dist = dist
save_dict = {'matrix': mtx, 'distortion_coef': dist}
with open('calibrate_camera.p', 'wb') as f:
pickle.dump(save_dict, f)
return self.mtx, self.dist
def draw(self, img, draw=False):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret == True:
cv2.cornerSubPix(gray, corners, (13, 13), (-1, -1), criteria)
# Draw and display the corners
cv2.drawChessboardCorners(img, (9, 6), corners, ret)
if draw:
cv2.imshow('img', img)
cv2.waitKey(500)
return img
def load_calibrate_camera(self):
#load saved data from pickle
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
self.mtx = save_dict['matrix']
self.dist = save_dict['distortion_coef']
return self.mtx, self.dist
def undistort(self,img):
if self.mtx is None:
self.calibrate_camera()
dst = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return dst
def _src_dest_frame(self, img):
#return points for warping one image into another.
src = np.float32([[190, 700], [1110, 700], [720, 470], [570, 470]])
bottom_left = src[0][0] + 100, src[0][1]
bottom_right = src[1][0] - 200, src[1][1]
top_left = src[3][0] - 250, 1
top_right = src[2][0] + 200, 1
dst = np.float32([bottom_left, bottom_right, top_right, top_left])
return src, dst
def perspective_transform(self, img):
#do perspective_ transformation using src, dst -> from dashboard view to birds eye view.
w = img.shape[1]
h = img.shape[0]
img_size = (w, h)
src, dst = self._src_dest_frame(img)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped, M, Minv
def perspective_transform_with_poi(self, img, original_image):
warped, M, Minv = self.perspective_transform(img)
#original idea was to draw poi
preview = np.copy(original_image)
return preview, warped, M, Minv
|
class GenerationFactory(object):
"""description of class"""
def buildDictionaryfromPostRequest(form):
numrows = form['numrows']
dictionary = {}
counter_group = 1
for j in form.items():
num_row = j[0][3]
if j[0][4].isnumeric():
num_row =j[0][3]+ j[0][4]
if num_row.isnumeric():
order = form['row' + num_row + '_order']
if order not in dictionary:
dictionary[order] = {}
# else:
# dictionary[order+"."+str(counter_group)] = {}
# counter_group = counter_group +1
dictionary_aux={}
dictionary_aux = dictionary[order]
if "payload" in j[0]:
last_position = len(j[0])-1
order_list = int(j[0][last_position])-1
if dictionary_aux.__contains__("Payload"):
size= len(dictionary_aux["Payload"])-1
if (size<=order_list):
if (order_list-size>1):
for k in range(size,order_list):
list_aux.append("")
dictionary_aux["Payload"].append(j[1])
else:
dictionary_aux["Payload"].append(j[1])
else:
dictionary_aux["Payload"][order_list]=j[1]
else:
list_aux = []
size= 0
if (size<=order_list):
if (order_list-size>=1):
for k in range(size,order_list):
list_aux.append("")
list_aux.append(j[1])
else:
list_aux.append(j[1])
else:
list_aux[order_list]=j[1]
dictionary_aux["Payload"] = list_aux
elif "response" in j[0]:
last_position = len(j[0])-1
order_list = int(j[0][last_position])-1
if dictionary_aux.__contains__("Response"):
size= len(dictionary_aux["Response"])-1
if (size<=order_list):
if (order_list-size>1):
for k in range(size,order_list):
list_aux.append("")
dictionary_aux["Response"].append(j[1])
else:
dictionary_aux["Response"].append(j[1])
else:
dictionary_aux["Response"][order_list]=j[1]
else:
list_aux = []
size= 0
if (size<=order_list):
if (order_list-size>=1):
for k in range(size,order_list):
list_aux.append("")
list_aux.append(j[1])
else:
list_aux.append(j[1])
else:
list_aux[order_list]=j[1]
dictionary_aux["Response"] = list_aux
elif "command" in j[0]:
dictionary_aux["Command"] = GenerationFactory.getCommandbyName(j[1])
elif "error" in j[0]:
dictionary_aux["Error"] = j[1]
dictionary[order] = dictionary_aux
return dictionary
def generateManualTestFile(dictionary,counter):
testfile = """import unittest
import uartdevice
import log
import sys
import msvcrt
import xmlhandler
import os
#disable traceback
__unittest = True
class Run(unittest.TestCase):
def setUp(self):
xml = xmlhandler.Xml(os.path.dirname(os.path.abspath(__file__)), "testsettings.xml")
self.baudrate = xml.getBaudrate()
self.comport = xml.getComport()
uartdevice.open_com(self.comport, self.baudrate)
def tearDown(self):
if self.comport is not None:
uartdevice.close_com()
"""
testloop=""
try:
for i in range(0,counter):
if str(i) in dictionary:
dictionary_aux = {}
dictionary_aux = dictionary[str(i)]
testloop= testloop+"""
def test__E_0"""+str(i)+"""(self):
"""
testloop= testloop+""" success, response = uartdevice.send_uart_cmd(["""
for j in range(0,len(dictionary_aux['Command'])):
testloop= testloop+ """ord(\""""+dictionary_aux['Command'][j]+"""\"), """
if 'Payload' in dictionary_aux:
for itemlist in dictionary_aux['Payload']:
for i in GenerationFactory.my_range (1,len(itemlist),2):
testloop = testloop+"""0x"""+itemlist[i-1]+itemlist[i]+""", """
testloop=testloop.rstrip(' ')
testloop=testloop.rstrip(',')
testloop=testloop+ """])"""
testloop= testloop+"""
log.printline_ascii(''.join('%02x '%i for i in response))
"""
if 'Response' in dictionary_aux:
testloop = testloop+"""self.assertEqual(response,["""
for itemlist in dictionary_aux['Response']:
i=0
while i < len(itemlist):
if GenerationFactory.is_hex(itemlist[i]):
testloop = testloop+"""0x"""+itemlist[i]+itemlist[i+1]+""", """
i = i+2
else:
testloop = testloop+"""ord(\""""+itemlist[i]+"""\"), """
i=i+1
testloop=testloop.rstrip(' ')
testloop=testloop.rstrip(',')
testloop=testloop+ """])"""
if 'Error' in dictionary_aux and not dictionary_aux['Error']=="":
testloop = testloop+"""self.assertEqual(response,[ord(\""""+dictionary_aux['Error']+"""\")])"""
testloop+="\n"
testfile = testfile+testloop
except Exception:
print(dictionary)
return testfile
def getCommandbyName(name):
if name == "Reset":
return "x"
elif name == "Select":
return "s"
elif name == "Transfer":
return "t"
elif name == "Transfer Layer 4":
return "t4"
elif name == "Get Version":
return "v"
elif name == "RF-Field On":
return "pon"
elif name == "RF-Field Off":
return "poff"
elif name == "Set User Ports":
return "pp"
elif name == "Read User Ports":
return "pr"
elif name == "Write User Ports":
return "pw"
elif name == "Login":
return "l"
elif name == "Read Block":
return "rb"
elif name == "Read Multiple Block":
return "rd"
elif name == "Read Value Block":
return "rv"
elif name == "Write Block":
return "wb"
elif name =="Write Multiple Block":
return "wd"
elif name == "Write Value Block":
return "wv"
elif name == "Increment Value Block":
return "+"
elif name == "Decrement Value Block":
return "-"
elif name == "Copy Value Block":
return "="
def my_range(start, end, step):
while start <= end:
yield start
start += step
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False |
import tensorflow as tf
def compute_reward(seq, actions, ignore_far_sim=True, temp_dist_thre=20, use_gpu=False):
"""
Compute diversity reward and representativeness reward
Args:
seq: sequence of features, shape (1, seq_len, dim)
actions: binary action sequence, shape (1, seq_len, 1)
ignore_far_sim (bool): whether to ignore temporally distant similarity (default: True)
temp_dist_thre (int): threshold for ignoring temporally distant similarity (default: 20)
use_gpu (bool): whether to use GPU
"""
# _seq = tf.stop_gradient(seq)
# _actions = tf.stop_gradient(actions)
_seq = seq
_actions = actions
pick_idxs = tf.squeeze(tf.where(tf.math.not_equal(tf.squeeze(_actions), tf.constant(0, dtype=tf.int32))))
num_picks = len(pick_idxs) if len(pick_idxs.shape) > 0 else 1
if num_picks == 0:
# give zero reward is no frames are selected
reward = tf.constant(0.0)
# if use_gpu: reward = reward.cuda()
return reward
_seq = tf.squeeze(_seq)
n = _seq.shape[0]
# compute diversity reward
pick_idxs = pick_idxs.numpy()
if num_picks == 1:
reward_div = tf.constant(0.0)
# if use_gpu: reward_div = reward_div.cuda()
else:
normed_seq = _seq / tf.linalg.norm(_seq, axis=1, keepdims=True)
dissim_mat = 1.0 - tf.linalg.matmul(normed_seq, tf.transpose(normed_seq)) # dissimilarity matrix [Eq.4]
dissim_submat = dissim_mat.numpy()[pick_idxs,:][:,pick_idxs]
if ignore_far_sim:
# ignore temporally distant similarity
pick_idxs = tf.convert_to_tensor(pick_idxs)
pick_mat = tf.broadcast_to(pick_idxs, (num_picks, num_picks))
temp_dist_mat = tf.math.abs(pick_mat - tf.transpose(pick_mat))
temp_dist_mat = temp_dist_mat.numpy()
dissim_submat[temp_dist_mat > temp_dist_thre] = 1.0
reward_div = tf.math.reduce_sum(dissim_submat) / (num_picks * (num_picks - 1.0)) # diversity reward [Eq.3]
# compute representativeness reward
dist_mat = tf.broadcast_to(tf.math.reduce_sum(tf.math.pow(_seq, 2), axis=1, keepdims=True), (n, n))
dist_mat = dist_mat + tf.transpose(dist_mat)
# dist_mat.addmm_(1, -2, _seq, _seq.t())
dist_mat = dist_mat - 2*tf.linalg.matmul(_seq, _seq, False, True)
pick_idxs = pick_idxs.numpy()
dist_mat = tf.convert_to_tensor(dist_mat.numpy()[:,pick_idxs])
dist_mat = tf.math.reduce_min(dist_mat, axis=1, keepdims=True)[0]
#reward_rep = torch.exp(torch.FloatTensor([-dist_mat.mean()]))[0] # representativeness reward [Eq.5]
reward_rep = tf.math.exp(-tf.math.reduce_mean(dist_mat))
# combine the two rewards
reward = (reward_div + reward_rep) * 0.5
return reward
|
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
nums_length = len(nums)
left = 0
right = nums_length - 1
# finding the left out of order
while left < nums_length - 1:
if nums[left] > nums[left + 1]:
break
left += 1
if left >= nums_length - 1:
return 0
# finding the right out of order
while right - 1 > 0:
if nums[right] < nums[right - 1]:
break
right -= 1
# need to find min of array slice, either code or use min
local_min = min(nums[left:right + 1])
# need to extend array if there is a larger item on the left
for i in range(left - 1, -1, -1):
if nums[i] > local_min:
left = i
# need to find max of array slice, either code or use max
local_max = max(nums[left:right + 1])
# need to extend array if there is a smaller item on the right
for i in range(right + 1, nums_length):
if nums[i] < local_max:
right = i
return len(nums[left:right + 1]) |
import pandas as pd
import numpy as np
import random as rd
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
#PPREPARE DATA ####################################################################
genes = ['gene' + str(i) for i in range(1, 101)]
wt = ['wt' + str(i) for i in range(1,6)]
ko = ['ko' + str(i) for i in range(1,6)]
data = pd.DataFrame(columns=[*wt, *ko], index=genes)
for gene in data.index:
data.loc[gene, 'wt1':'wt5'] = np.random.poisson(lam=rd.randrange(10, 1000), size=5)
data.loc[gene, 'ko1':'ko5'] = np.random.poisson(lam=rd.randrange(10, 1000), size=5)
print(data.head())
"""
wt1 wt2 wt3 wt4 wt5 ko1 ko2 ko3 ko4 ko5
gene1 832 820 822 816 858 890 954 894 945 950
gene2 898 1001 1006 998 943 209 234 203 236 211
gene3 422 387 426 422 431 894 796 876 820 810
gene4 950 922 936 1000 939 977 952 970 946 961
gene5 845 874 927 850 900 451 489 482 452 498
"""
print(data.shap)
"""(100, 10)
100 rows x 10 columns
"""
# CENTERING ####################################################################
scaled_data = preprocessing.scale(data.T)
pca = PCA()
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
####################################################################
per_var = np.round(pca.explained_variance_ratio_ * 100, decimals=1)
labels = ['PC' + str(x) for x in range(1, len(per_var)+1)]
plt.bar(x=range(1, len(per_var)+1), height=per_var, tick_label=labels)
plt.ylabel('Percentage of Explained Variance')
plt.xlabel('Principal Component')
plt.title('Scree Plot')
plt.show()
pca_df = pd.DataFrame(pca_data, index=[*wt, *ko], columns=labels)
plt.scatter(pca_df.PC1, pca_df.PC2)
plt.title('My PCA Graph')
plt.xlabel('PC1 - {0}%'.format(per_var[0]))
plt.ylabel('PC2 - {0}%'.format(per_var[1]))
for sample in pca_df.index:
plt.annotate(sample, (pca_df.PC1.loc[sample], pca_df.PC2.loc[sample]))
plt.show()
####################################################################
loading_scores = pd.Series(pca.components_[0], index=genes)
sorted_loading_scores = loading_scores.abs().sort_values(ascending=False)
top_10_genes = sorted_loading_scores[0:10].index.values
print(loading_scores[top_10_genes])
|
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
from hashlib import sha256
import os
from .... import Document, SchemaListProperty, StringProperty, \
StringListProperty
class Permission(Document):
name = StringProperty(required=True)
class Group(Document):
"""
Group class, contains multiple permissions.
"""
name = StringProperty(required=True)
permissions = SchemaListProperty(Permission)
class User(Document):
"""The base User model. This should be extended by the user."""
login = StringProperty(required=True)
password = StringProperty(required=True)
groups = StringListProperty()
@staticmethod
def _hash_password(cleartext):
if isinstance(cleartext, unicode):
password_8bit = cleartext.encode('UTF-8')
else:
password_8bit = cleartext
salt = sha256()
salt.update(os.urandom(60))
hash = sha256()
hash.update(password_8bit + salt.hexdigest())
hashed_password = salt.hexdigest() + hash.hexdigest()
if not isinstance(hashed_password, unicode):
hashed_password = hashed_password.decode('UTF-8')
return hashed_password
def set_password(self, password):
self.password = self._hash_password(password)
@staticmethod
def authenticate(login, password):
user = User.view("user/by_login", key=login).one()
if not user:
return None
hashed_pass = sha256()
hashed_pass.update(password + user.password[:64])
if user.password[64:] != hashed_pass.hexdigest():
return None
return user
|
import os
def get_db(DB_PATH):
classes = [x for x in sorted(os.listdir(DB_PATH))]
tmp = [[os.path.join(DB_PATH, c, x) for x in sorted(os.listdir(os.path.join(DB_PATH, c)))] for c in classes]
im_paths = []
labels = []
for l, t in enumerate(tmp):
im_paths.extend(t)
labels.extend([l] * len(t))
return im_paths, labels, classes |
import os
import sys
a='his way of connecting with the family was always making us a dish , making us dinner , " louis gal@@ icia said .'
# b=a.replace('"','')
# print(b)
# exit()
file_path = "/search/odin/haiyang/fairseq_exp/t2t_decoder/test_data/wmt17test2001.src"
with open(file_path, "r") as f:
# line=a.replace('"','\"')
line = f.readline().strip().replace('"','\\"').replace('$', '\\$').replace('`', '\\`')
while line:
print(line)
# exit()
trans = os.popen(
'./client --ip 127.0.0.1 --port 10098 -t 1 -n 1 -a 0 --source_language en --target_language zh -i "' + line + '"').readlines()
print(trans[0].strip())
line = f.readline().strip().replace('"','\\"').replace('$', '\\$').replace('`', '\\`')
|
from urllib.parse import urlparse, urlencode
from nameko.extensions import DependencyProvider
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
COUCHBASE_KEY = 'COUCHBASE'
class Couchbase(DependencyProvider):
def __init__(self, bucket):
self.cluster = None
self.authenticator = None
self.bucket = bucket
def setup(self):
config = self.container.config[COUCHBASE_KEY]
uri = urlparse(config['URI'])
params = urlencode(config.get('CLIENT_CONFIG'), {})
self.authenticator = PasswordAuthenticator(uri.username, uri.password)
self.cluster = Cluster('{}://{}?{}'.format(uri.scheme, uri.hostname, params))
def start(self):
self.cluster.authenticate(self.authenticator)
def stop(self):
self.cluster = None
def kill(self):
self.cluster = None
def get_dependency(self, worker_ctx):
return self.cluster.open_bucket(self.bucket)
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Solver-module to solve problems, which are decomposed into subdomains.
"""
import numpy as np
try:
from pyfeti import SerialFETIsolver
use_pyfeti = True
except:
use_pyfeti = False
__all__ = [
'FETISolver',
]
class DomainDecompositionBase:
def __init__(self):
pass
def solve(self, mech_systems_dict, connectors):
pass
class FETISolver(DomainDecompositionBase):
"""
Wrapper for FETI-solvers, provided by the PYFETI package
"""
def __init__(self):
super().__init__()
def solve(self, mech_systems_dict, connectors):
"""
Solves a non-overlapping decomposed problem.
Parameters
----------
mech_systems_dict : dict
Mechanical-System Translators of each substructure
connectors : dict
Connection-matrices for the interfaces between substructures
Returns
-------
q_dict : dict
solutions of decomposed system for each substructure
"""
if use_pyfeti:
K_dict, B_dict, f_dict = self._create_K_B_f_dict(mech_systems_dict, connectors)
fetisolver = SerialFETIsolver(K_dict, B_dict, f_dict)
solution = fetisolver.solve()
q_dict = solution.u_dict
return q_dict
else:
raise ValueError('Could not import PYFETI-library. Please install it, or use a different solver.')
def _create_K_B_f_dict(self, mech_systems_dict, connectors_dict):
K_dict = dict()
B_dict = dict()
f_dict = dict()
for i_system, mech_system in mech_systems_dict.items():
u = np.zeros((mech_system.dimension, 1))
subs_key = int(i_system)
K_dict[subs_key] = mech_system.K(u, u, 0)
f_dict[subs_key] = mech_system.f_ext(u, u, 0)
B_local = dict()
for key in connectors_dict.keys():
if int(key[1]) == i_system:
local_key = (int(key[1]), int(key[0]))
B_local[local_key] = connectors_dict[key]
B_dict[subs_key] = B_local
return K_dict, B_dict, f_dict
|
import json
from typing import Generator
import requests
def lazy_load_json(path: str) -> Generator:
""" Lazy load MOCK_DATA.json
Lazy load MOCK_DATA.json line by line and parsing to string to json
Arguments:
path (str): path to MOCK_DATA.json
Returns:
Generator
"""
with open(path, "r") as file:
for line in file:
if line.startswith("[") and line.endswith(",\n"):
line = json.loads(line[1:-2])
elif line.endswith(",\n"):
line = json.loads(line[:-2])
elif line.endswith("]\n"):
line = json.loads(line[:-2])
yield line
def update_monitor_counter(monitor_url: str, increment_by: int) -> dict:
""" Update monitor counter
Post increment_by to monitor_url and update respective counter
Arguments:
monitor_url (str): monitor endpoint url
increment_by (int): increment counter by value
Returns:
dict
"""
if increment_by > 0:
payload = {"increment_by": increment_by}
response = requests.post(monitor_url, data=json.dumps(payload))
return response.json()
|
# import email
# import imaplib
# import re
# import parsedatetime
# from datetime import datetime
# from dateutil.parser import parse
# from flask import Flask, jsonify, render_template, request
# from flask_script import Manager
# import logging
#
#
# def dttm_from_timtuple(d):
# return datetime(
# d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
#
#
# def parse_datetime(s):
# if not s:
# return None
# try:
# dttm = parse(s)
# except Exception:
# try:
# cal = parsedatetime.Calendar()
# parsed_dttm, parsed_flags = cal.parseDT(s)
# # when time is not extracted, we 'reset to midnight'
# if parsed_flags & 2 == 0:
# parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
# dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
# except Exception as e:
# raise ValueError("Couldn't parse date string [{}]".format(s))
# return dttm
#
#
# app = Flask(__name__)
#
#
# @app.route("/riverboat")
# def riverboat():
# try:
# EMAIL = request.args.get("email")
# PASSWORD = request.args.get("password")
# SERVER = 'imap.gmail.com'
# mail = imaplib.IMAP4_SSL(SERVER)
# mail.login(EMAIL, PASSWORD)
# mail.select('inbox')
# status, data = mail.search(None, 'ALL')
# mail_ids = []
#
# for block in data:
# mail_ids += block.split()
# final_mail = None
# for i in mail_ids:
# status, data = mail.fetch(i, '(RFC822)')
# mail_content = ''
# for response_part in data:
# if isinstance(response_part, tuple):
# message = email.message_from_bytes(response_part[1])
#
# mail_from = message['from']
# if mail_from != "Educative <support@educative.io>":
# continue
# mail_subject = message['subject']
# if message.is_multipart():
# mail_content = ''
# for part in message.get_payload():
# if part.get_content_type() == 'text/plain':
# mail_content += part.get_payload()
# else:
# mail_content = message.get_payload()
# final_mail = mail_content
#
# code = re.findall("Educative: ([0-9]{6})", final_mail)
# if code:
# msg = code[0]
# else:
# msg = '0'
# return jsonify(code=msg)
# except Exception as e:
# return jsonify(code='0', error=str(e))
#
#
# @app.route("/get_code")
# def get_code():
# return "您好,系统升级,此方法获取验证码已经下线,请通过查看账号密码的链接获取验证码。如有问题,请联系旺旺客服"
#
#
# @app.route("/get_code2")
# def get_code2():
# return "您好,系统升级,此方法获取验证码已经下线,请通过查看账号密码的链接获取验证码。如有问题,请联系旺旺客服"
#
#
# @app.route("/get_code3")
# def get_code3():
# return "您好,系统升级,此方法获取验证码已经下线,请通过查看账号密码的链接获取验证码。如有问题,请联系旺旺客服"
#
#
# if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80)
|
"""
Ionization cross section models.
"""
# Standard library modules.
# Third party modules.
# Local modules.
from pymontecarlo_gui.options.model.base import ModelFieldBase
# Globals and constants variables.
class IonizationCrossSectionModelField(ModelFieldBase):
def title(self):
return "Ionization cross section"
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import url
from django.contrib import admin
from django.forms.models import modelform_factory
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import views
from .. import settings as filer_settings
from ..models import Clipboard, ClipboardItem, Folder
from ..utils.files import (
UploadException,
handle_request_files_upload,
handle_upload,
)
from ..utils.loader import load_model
NO_FOLDER_ERROR = "Can't find folder to upload. Please refresh and try again"
NO_PERMISSIONS_FOR_FOLDER = (
"Can't use this folder, Permission Denied. Please select another folder."
)
Image = load_model(filer_settings.FILER_IMAGE_MODEL)
# ModelAdmins
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
return [
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
url(r'^operations/upload/(?P<folder_id>[0-9]+)/$',
ajax_upload,
name='filer-ajax_upload'),
url(r'^operations/upload/no_folder/$',
ajax_upload,
name='filer-ajax_upload'),
] + super(ClipboardAdmin, self).get_urls()
def get_model_perms(self, *args, **kwargs):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
@csrf_exempt
def ajax_upload(request, folder_id=None):
"""
Receives an upload from the uploader. Receives only one file at a time.
"""
folder = None
if folder_id:
try:
# Get folder
folder = Folder.objects.get(pk=folder_id)
except Folder.DoesNotExist:
return JsonResponse({'error': NO_FOLDER_ERROR})
# check permissions
if folder and not folder.has_add_children_permission(request):
return JsonResponse({'error': NO_PERMISSIONS_FOR_FOLDER})
try:
if len(request.FILES) == 1:
# dont check if request is ajax or not, just grab the file
upload, filename, is_raw = handle_request_files_upload(request)
else:
# else process the request as usual
upload, filename, is_raw = handle_upload(request)
# TODO: Deprecated/refactor
# Get clipboad
# clipboard = Clipboard.objects.get_or_create(user=request.user)[0]
# find the file type
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_model(filer_class)
# TODO: What if there are more than one that qualify?
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model=FileSubClass,
fields=('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
# Enforce the FILER_IS_PUBLIC_DEFAULT
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.folder = folder
file_obj.save()
# TODO: Deprecated/refactor
# clipboard_item = ClipboardItem(
# clipboard=clipboard, file=file_obj)
# clipboard_item.save()
# Try to generate thumbnails.
if not file_obj.icons:
# There is no point to continue, as we can't generate
# thumbnails for this file. Usual reasons: bad format or
# filename.
file_obj.delete()
# This would be logged in BaseImage._generate_thumbnails()
# if FILER_ENABLE_LOGGING is on.
return JsonResponse(
{'error': 'failed to generate icons for file'},
status=500,
)
thumbnail = None
# Backwards compatibility: try to get specific icon size (32px)
# first. Then try medium icon size (they are already sorted),
# fallback to the first (smallest) configured icon.
for size in (['32'] +
filer_settings.FILER_ADMIN_ICON_SIZES[1::-1]):
try:
thumbnail = file_obj.icons[size]
break
except KeyError:
continue
data = {
'thumbnail': thumbnail,
'alt_text': '',
'label': str(file_obj),
'file_id': file_obj.pk,
}
# prepare preview thumbnail
if type(file_obj) == Image:
thumbnail_180_options = {
'size': (180, 180),
'crop': True,
'upscale': True,
}
thumbnail_180 = file_obj.file.get_thumbnail(
thumbnail_180_options)
data['thumbnail_180'] = thumbnail_180.url
data['original_image'] = file_obj.url
return JsonResponse(data)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in list(
uploadform.errors.items())
])
raise UploadException(
"AJAX request not valid: form invalid '%s'" % (
form_errors,))
except UploadException as e:
return JsonResponse({'error': str(e)}, status=500)
|
#!/usr/bin/env python
# Tinta
# Copyright 2021 github.com/brandoncript
# This program is bound to the Hippocratic License 2.1
# Full text is available here:
# https://firstdonoharm.dev/version/2/1/license
# Further to adherence to the Hippocratic Licenese, permission is hereby
# granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software") under the terms of the
# MIT License to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and / or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the conditions layed
# out in the MIT License.
# Where a conflict or dispute would arise between these two licenses, HLv2.1
# shall take precedence.
from setuptools import setup
from pathlib import Path
with Path('README.md').open() as f:
long_description = f.read()
setup(name='tinta',
version='0.1.3a1-0',
description='Tinta, the a magical console output tool.',
long_description=long_description,
long_description_content_type='text/markdown',
url='http://github.com/brandonscript/tinta',
author='Brandon Shelley',
author_email='brandon@pacificaviator.co',
install_requires=['ansicolors'],
include_package_data=True,
license='MIT',
packages=['tinta'],
keywords='console colors ansi print terminal development',
python_requires='>=3.6',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
zip_safe=False)
|
# wink-tokenizer
# Multilingual tokenizer that automatically tags each token with its type.
#
# Copyright (C) 2017-19 GRAYPE Systems Private Limited
#
# This file is part of “wink-tokenizer”.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import re
from enum import Enum
from mantistable.process.utils.assets.assets import Assets
class TokenTagEnum(Enum):
WORD = "word"
NUMBER = "number"
QUOTED_PHRASE = "quoted_phrase"
URL = "url"
EMAIL = "email"
MENTION = "mention"
HASHTAG = "hash_tag"
EMOJI = "emoji"
TIME = "time"
ORDINAL = "ordinal"
CURRENCY = "currency"
PUNCTUATION = "punctuation"
SYMBOL = "symbol"
ALIEN = "alien"
class Token:
def __init__(self, value: str, tag: TokenTagEnum):
self.value = value
self.tag = tag
def __str__(self):
return f"{self.value}[{self.tag.value}]"
def __repr__(self):
return str(self)
class Tokenizer:
contractions = json.loads(Assets().get_asset("contractions.json"))
rgx_quoted_phrase = r"\"[^\"]*\""
rgx_url = r"(?i)(?:https?:\/\/)(?:[\da-z\.-]+)\.(?:[a-z\.]{2,6})(?:[\/\w\.\-\?#=]*)*\/?"
rgx_email = r"(?i)[-!#$%&'*+\/=?^\w{|}~](?:\.?[-!#$%&'*+\/=?^\w`{|}~])*@[a-z0-9](?:-?\.?[a-z0-9])*(?:\.[a-z](?:-?[a-z0-9])*)+"
rgx_mention = r"@\w+"
rgx_hash_tag_l1 = r"(?i)#[a-z][a-z0-9]*"
rgx_hash_tag_dv = r"(?i)#[\u0900-\u0963\u0970-\u097F][\u0900-\u0963\u0970-\u097F\u0966-\u096F0-9]*"
rgx_emoji = r"[\uD800-\uDBFF][\uDC00-\uDFFF]|[\u2600-\u26FF]|[\u2700-\u27BF]"
rgx_time = r"(?i)(?:\d|[01]\d|2[0-3]):?(?:[0-5][0-9])?\s?(?:[ap]\.?m\.?|hours|hrs)"
rgx_ordinal_l1 = r"1\dth|[04-9]th|1st|2nd|3rd|[02-9]1st|[02-9]2nd|[02-9]3rd|[02-9][04-9]th|\d+\d[04-9]th|\d+\d1st|\d+\d2nd|\d+\d3rd"
rgx_currency = r"[₿₽₹₨$£¥€₩]"
rgx_punctuation = r"[’'‘’`“”\"\[\]\(\){}…,\.!;\?\-:\u0964\u0965]"
rgx_symbol = r"[\u0950~@#%\^\+=\*\|\/<>&]"
rgx_word_l1 = r"(?i)[a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF][a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF']*"
rgx_word_dv = r"(?i)[\u0900-\u094F\u0951-\u0963\u0970-\u097F]+"
rgx_number_l1 = r"\d+\/\d+|\d(?:[\.,-\/]?\d)*(?:\.\d+)?"
rgx_number_dv = r"[\u0966-\u096F]+\/[\u0966-\u096F]+|[\u0966-\u096F](?:[\.,-\/]?[\u0966-\u096F])*(?:\.[\u0966-\u096F]+)?"
rgx_pos_singular = r"(?i)([a-z]+)('s)$"
rgx_pos_plural = r"(?i)([a-z]+s)(')$"
def __init__(self):
self.regexes = [
(Tokenizer.rgx_quoted_phrase, TokenTagEnum.QUOTED_PHRASE),
(Tokenizer.rgx_url, TokenTagEnum.URL),
(Tokenizer.rgx_email, TokenTagEnum.EMAIL),
(Tokenizer.rgx_mention, TokenTagEnum.MENTION),
(Tokenizer.rgx_hash_tag_l1, TokenTagEnum.HASHTAG),
(Tokenizer.rgx_hash_tag_dv, TokenTagEnum.HASHTAG),
(Tokenizer.rgx_emoji, TokenTagEnum.EMOJI),
(Tokenizer.rgx_time, TokenTagEnum.TIME),
(Tokenizer.rgx_ordinal_l1, TokenTagEnum.ORDINAL),
(Tokenizer.rgx_number_l1, TokenTagEnum.NUMBER),
(Tokenizer.rgx_number_dv, TokenTagEnum.NUMBER),
(Tokenizer.rgx_currency, TokenTagEnum.CURRENCY),
(Tokenizer.rgx_word_l1, TokenTagEnum.WORD),
(Tokenizer.rgx_word_dv, TokenTagEnum.WORD),
(Tokenizer.rgx_punctuation, TokenTagEnum.PUNCTUATION),
(Tokenizer.rgx_symbol, TokenTagEnum.SYMBOL),
]
def tokenize(self, text):
return self._tokenize_recursive(text, self.regexes)
def _tokenize_recursive(self, text, regexes):
sentence = text.strip()
final_tokens = []
if len(regexes) <= 0:
# No regex left, split on spaces and tag every token as **alien**
for tkn in re.split(r"\s+", text):
final_tokens.append(Token(tkn.strip(), TokenTagEnum.ALIEN))
return final_tokens
rgx = regexes[0]
tokens = self._tokenize_text_unit(sentence, rgx)
for token in tokens:
if isinstance(token, str):
final_tokens.extend(self._tokenize_recursive(token, regexes[1:]))
else:
final_tokens.append(token)
return final_tokens
def _tokenize_text_unit(self, text, regex):
matches = list(re.findall(regex[0], text))
balance = list(re.split(regex[0], text))
tokens = []
tag = regex[1]
k = 0
for i in range(0, len(balance)):
t = balance[i].strip()
if len(t) > 0:
tokens.append(t)
if k < len(matches):
if tag == TokenTagEnum.WORD:
# Possible contraction
aword = matches[k]
if aword.find("'") >= 0:
tokens = self._manage_contraction(aword, tokens)
else:
# No contractions
tokens.append(Token(aword, tag))
else:
tokens.append(Token(matches[k], tag))
k += 1
return tokens
def _manage_contraction(self, word, tokens):
ct = Tokenizer.contractions.get(word, None)
if ct is None:
# Possessive of singular and plural forms
matches = re.match(Tokenizer.rgx_pos_singular, word)
if matches is not None:
tokens.append(Token(matches[1], TokenTagEnum.WORD))
tokens.append(Token(matches[2], TokenTagEnum.WORD))
else:
matches = re.match(Tokenizer.rgx_pos_plural, word)
if matches is not None:
tokens.append(Token(matches[1], TokenTagEnum.WORD))
tokens.append(Token(matches[2], TokenTagEnum.WORD))
else:
tokens.append(Token(word[0:word.find("'")], TokenTagEnum.WORD))
tokens.append(Token("'", TokenTagEnum.PUNCTUATION))
tokens.append(Token(word[word.find("'")+1:], TokenTagEnum.WORD))
else:
# Lookup
tokens.append(Token(ct[0], TokenTagEnum.WORD))
tokens.append(Token(ct[1], TokenTagEnum.WORD))
if len(ct) == 3:
tokens.append(Token(ct[2], TokenTagEnum.WORD))
return tokens
|
import workers
from checks import health_task
from polyaxon.settings import K8SEventsCeleryTasks
@workers.app.task(name=K8SEventsCeleryTasks.K8S_EVENTS_HEALTH, ignore_result=False)
def k8s_events_health(x: int, y: int) -> int:
return health_task.health_task(x, y)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from builtins import super, range, zip, round, map
from .base import (
DiTToHasTraits,
Float,
Unicode,
Any,
Int,
Bool,
List,
observe,
Instance,
)
from .position import Position
class PhaseLoad(DiTToHasTraits):
phase = Unicode(
help="""The phase (A, B, C, N, s1, s2) of the load""", default_value=None
)
p = Float(
help="""The active power of the load which is fixed. Positive values represent flow out of the node.""",
default_value=None,
)
q = Float(
help="""The reactive power of the load which is fixed. Positive values represent flow out of the node.""",
default_value=None,
)
# Modification: Nicolas (August 2017)
# OpenDSS has 8 different load models. Without this field there is no way to capture
# this information in DiTTo ==> Only constant P&Q and Zipload would be considered
# Note: use_zip can probably be removed since it is equivalent to model=8
model = Int(help="""OpenDSS Load Model number.""", default_value=1)
# TO REMOVE??
use_zip = Int(
help="""Describes whether the load is reprsented as a zipload or not. 1 represents zipload with fractions taken from the p and q values above. 0 represents a load defined by p & q alone.""",
default_value=0,
)
# Modification: Nicolas Gensollen (December 2017)
# Drop flag is used if we created objects in the reader that we do not want to output.
# This is much faster than looping over objects to remove them in a pre/post-processing step
drop = Bool(
help="""Set to 1 if the object should be dropped in the writing process. Otherwise leave 0.""",
default_value=False,
)
ppercentcurrent = Float(
help="""This is the portion of active power load modeled as constant current. Active portions of current, power and impedance should all add to 1. Used for ZIP models.""",
default_value=None,
)
qpercentcurrent = Float(
help=""" This is the portion of active power load modeled as constant impedance. Reactive portions of current, power and impedance should all add to 1. Used for ZIP models.""",
default_value=None,
)
ppercentpower = Float(
help="""This is the portion of active power load modeled as constant power. Active portions of current, power and impedance should all add to 1. Used for ZIP models."""
)
qpercentpower = Float(
help="""This is the portion of reactive power load modeled as constant current. Reactive portions of current, power and impedance should all add to 1. Used for ZIP models."""
)
ppercentimpedance = Float(
help="""This is the portion of reactive power load modeled as Active portions of current, power and impedance should all add to 1. constant impedance. Used for ZIP models.""",
default_value=None,
)
qpercentimpedance = Float(
help="""This is the portion of reactive power load modeled as constant impedance. Reactive portions of current, power and impedance should all add to 1. Used for ZIP models.""",
default_value=None,
)
def build(self, model):
self._model = model
|
from django.shortcuts import render, redirect
from django.http import Http404, JsonResponse
from django.contrib.auth.decorators import login_required
from .models import Passenger, PassengerProfile
from .forms import NewPassenger, PassengerLogin
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
# Create your views here.
def passanger(request):
return render (request, 'passanger-home.html')
def new_passenger(request):
'''
View function to display a registration form when the user selects the passenger option
'''
title = 'Sign Up Passenger'
if request.method == 'POST':
form = NewPassenger(request.POST)
if form.is_valid:
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
phone_number = request.POST.get('phone_number')
new_passenger = Passenger(
first_name=first_name, last_name=last_name, phone_number=phone_number)
passengers = Passenger.objects.all()
for existing_passenger in passengers:
if int(new_passenger.phone_number) != int(existing_passenger.phone_number):
continue
elif int(new_passenger.phone_number) == int(existing_passenger.phone_number):
message = 'The number is already registered'
messages.error(
request, ('This number is already registered'))
return render(request, 'registration/registration_form-passanger.html', {"title": title, "form": form, "message": message})
break
new_passenger.save()
return redirect(passenger, new_passenger.id)
else:
messages.error(request, ('Please correct the error below.'))
else:
form = NewPassenger()
return render(request, 'registration/registration_form-passanger.html', {"title": title, "form": form})
def passenger_login(request):
'''
View function to display login form for a passenger
'''
title = "Sign In Passenger"
if request.method == 'POST':
form = PassengerLogin(request.POST)
if form.is_valid:
phone_number = request.POST.get('phone_number')
try:
found_passenger = Passenger.objects.get(
phone_number=phone_number)
return redirect(passenger, found_passenger.id)
except ObjectDoesNotExist:
raise Http404()
else:
messages.error(request, ('Please correct the error below.'))
else:
form = PassengerLogin()
return render(request, 'registration/login-passanger.html', {"title": title, "form": form})
# Passenger homepage is Profile page
# def passenger(request, id):
# '''
# View function to display an authenticated logged in passenger's profile
# '''
# passengers = Passenger.objects.all()
# try:
# passenger = Passenger.objects.get(id=id)
# if passenger in passengers:
# title = f'{passenger.first_name} {passenger.last_name}'
# passenger_profile = PassengerProfile.objects.get(
# passenger=passenger)
# return render(request, 'all-passengers/profile.html', {"title": title, "passenger": passenger, "passenger_profile": passenger_profile})
# else:
# return redirect(passenger_login)
# except ObjectDoesNotExist:
# return redirect(new_passenger)
# # raise Http404()
|
import threading
from queue import Queue
class A(threading.Thread):
def __init__(self, q, lis):
super(A, self).__init__()
self.q = q
self.lis = lis
def run(self):
print('self.qsize is:{}'.format(self.q.qsize()))
print('lis is : {}'.format(self.lis))
self.q.put('a')
self.lis.append('a')
class B(threading.Thread):
def __init__(self, q, lis):
super(B, self).__init__()
self.q = q
self.lis = lis
def run(self):
print(self.q.qsize())
print(self.lis)
def main():
lis = []
q = Queue()
a = A(q, lis)
a.start()
a.join()
b = B(q, lis)
b.start()
b.join()
print('主线程,子线程都结束')
if __name__ == '__main__':
main()
|
import requests
class Prowl:
def __init__(self, apikey):
self.apikey = apikey
def send_notification(self, message):
data = {'apikey': self.apikey,
'application': 'Corona Zahlen',
'event': message}
response = requests.post('https://api.prowlapp.com/publicapi/add', data=data,
headers={'Content-type': 'application/x-www-form-urlencoded'})
return response.status_code
|
# -*- coding: utf-8 -*-
"""Module for privacy policy-related views."""
|
# -*- coding: utf-8 -*-
"""
=========================================
Submaps and Cropping
=========================================
In this example we demonstrate how to get a submap of a map.
"""
##############################################################################
# Start by importing the necessary modules.
from __future__ import print_function, division
import astropy.units as u
import sunpy.map
import sunpy.data.sample
##############################################################################
# Sunpy sample data contains a number of suitable maps, where the sunpy.data.sample.NAME
# returns the location of the given FITS file.
swap_map = sunpy.map.Map(sunpy.data.sample.SWAP_LEVEL1_IMAGE)
##############################################################################
# This has resolution and ranges of:
print(swap_map.dimensions)
print(swap_map.data)
print(swap_map.meta)
##############################################################################
# To find out more specifics about this map and the instrument used, check it's
# metatdata:
print(swap_map.meta)
##############################################################################
# To crop the data you create a submap, specifying ranges in AstroPy Quantities:
rangex = u.Quantity([-900 * u.arcsec, 0 * u.arcsec])
rangey = u.Quantity([-900 * u.arcsec, -200 * u.arcsec])
swap_submap = swap_map.submap(rangex, rangey)
swap_submap.peek(draw_limb=True, draw_grid=True) |
import numpy as np
from collections import defaultdict
import math
from functools import cmp_to_key
import random
import graph
import estimation
import rewiring
#Note: We use round to even.
def check_deg_vec(deg_vec):
sum_deg_vec = 0
for k in deg_vec:
if deg_vec[k] < 0:
print("Error: The number of nodes with degree " + str(k) + " is less than zero.")
return False
sum_deg_vec += k*deg_vec[k]
if sum_deg_vec % 2 != 0:
print("Error: The sum of degrees is not an even number.")
return False
return True
def check_jnt_deg_mat(jnt_deg_mat, deg_vec):
for k in jnt_deg_mat:
for l in jnt_deg_mat[k]:
if k == l and jnt_deg_mat[k][l] % 2 != 0:
print("Error: The number of edges between nodes with degree " + str(k) + " and nodes with degree " + str(l) + " is not an even number.")
return False
if jnt_deg_mat[k][l] != jnt_deg_mat[l][k]:
print("Error: The number of edges between nodes with degree " + str(k) + " and nodes with degree " + str(l) + " is not symmetrical.")
return False
if jnt_deg_mat[k][l] < 0:
print("Error: The number of edges between nodes with degree " + str(k) + " and nodes with degree " + str(l) + " is less than zero.")
return False
sum_k = sum(list(jnt_deg_mat[k].values()))
if sum_k != k*deg_vec[k]:
print("Error: The sum of numbers of edges between nodes with degree " + str(k) + " and nodes with degree l for all l is not equal to " + str(k) + " times the number of nodes with degree " + str(k) + ".")
return False
return True
def select_random_key_with_smallest_value(dic):
# Select a key with the smallest value in a given dictionary
# If If there are two or more keys with the same value, we uniformly and randomly select between the keys.
if len(dic) == 0:
print("Error: The size of a given object is zero.")
exit()
min_value = float("inf")
keys = set()
for key in dic:
if dic[key] < min_value:
min_value = dic[key]
keys = set()
keys.add(key)
elif dic[key] == min_value:
keys.add(key)
return np.random.choice(list(keys))
def select_min_key_with_smallest_value(dic):
# Select a key with the smallest value in a given dictionary
# If If there are two or more keys with the same value, we select the smallest key.
if len(dic) == 0:
print("Error: The size of a given object is zero.")
exit()
min_value = float("inf")
keys = set()
for key in dic:
if dic[key] < min_value:
min_value = dic[key]
keys = set()
keys.add(key)
elif dic[key] == min_value:
keys.add(key)
return min(keys)
def initialize_target_degree_vector(est_n, est_dd):
# Initialize the target degree vector
tgt_deg_vec = defaultdict(int)
for k in est_dd:
tgt_deg_vec[k] = max(round(est_dd[k]*est_n), 1)
return tgt_deg_vec
def adjust_target_degree_vector(est_n, est_dd, tgt_deg_vec):
# Adjust the target degree vector
sum_deg = sum([k*tgt_deg_vec[k] for k in tgt_deg_vec])
if sum_deg % 2 == 0:
return tgt_deg_vec
degree_candidates = {}
for k in tgt_deg_vec:
if k % 2 == 0:
continue
x = est_dd[k]*est_n
y = tgt_deg_vec[k]
if x != 0:
delta_e = float(math.fabs(x-y-1))/x - float(math.fabs(x-y))/x
else:
delta_e = float("inf")
degree_candidates[k] = delta_e
if len(degree_candidates) > 0:
d = select_min_key_with_smallest_value(degree_candidates)
tgt_deg_vec[d] += 1
else:
tgt_deg_vec[1] += 1
return tgt_deg_vec
def cmp(a:list, b:list):
if a[1] < b[1]:
return -1
elif a[1] > b[1]:
return 1
else:
if a[0] < b[0]:
return 1
else:
return -1
def modify_target_degree_vector(subG: graph.Graph, est_n, est_dd, tgt_deg_vec):
# Assign the target degree of each node in the subgraph.
# In parallel with this assignment process, modify the target degree vector.
subG_deg_vec = defaultdict(int)
tgt_node_deg = {}
for v in subG.qry_nodes:
subG_d = len(subG.nlist[v])
tgt_node_deg[v] = subG_d
subG_deg_vec[subG_d] += 1
for d in subG_deg_vec:
if tgt_deg_vec[d] < subG_deg_vec[d]:
tgt_deg_vec[d] = subG_deg_vec[d]
visible_node_pairs = []
for v in subG.vis_nodes:
visible_node_pairs.append([v, len(subG.nlist[v])])
visible_node_pairs.sort(key=cmp_to_key(cmp))
for visible_node_pair in visible_node_pairs:
v = visible_node_pair[0]
subG_d = visible_node_pair[1]
degree_candidates = []
for k in tgt_deg_vec:
if k >= subG_d and tgt_deg_vec[k] > subG_deg_vec[k]:
for i in range(0, tgt_deg_vec[k] - subG_deg_vec[k]):
degree_candidates.append(k)
if len(degree_candidates) > 0:
tgt_node_deg[v] = np.random.choice(list(degree_candidates))
else:
degree_to_add_candidates = {}
for k in est_dd:
if k < subG_d:
continue
x = est_n*est_dd[k]
y = float(tgt_deg_vec[k])
if x != 0:
delta_e = float(math.fabs(x-y-1))/x - float(math.fabs(x-y))/x
else:
delta_e = float("inf")
degree_to_add_candidates[k] = delta_e
if len(degree_to_add_candidates) > 0:
tgt_node_deg[v] = select_min_key_with_smallest_value(degree_to_add_candidates)
else:
tgt_node_deg[v] = subG_d
tgt_deg_vec[tgt_node_deg[v]] += 1
subG_deg_vec[tgt_node_deg[v]] += 1
tgt_deg_vec = adjust_target_degree_vector(est_n, est_dd, tgt_deg_vec)
return [subG_deg_vec, tgt_deg_vec, tgt_node_deg]
def initialize_target_joint_degree_matrix(est_n, est_aved, est_jdd):
# Initialize the target joint degree mat
tgt_jnt_deg_mat = defaultdict(lambda: defaultdict(int))
for k in est_jdd:
for l in est_jdd[k]:
if est_jdd[k][l] <= 0:
continue
x = round(est_n*est_aved*est_jdd[k][l])
if k != l:
tgt_jnt_deg_mat[k][l] = max(x, 1)
else:
if x % 2 == 0:
tgt_jnt_deg_mat[k][l] = max(x, 2)
else:
y = est_n*est_aved*est_jdd[k][l]
if math.fabs(y-x+1) <= math.fabs(y-x-1):
tgt_jnt_deg_mat[k][l] = max(x-1,2)
else:
tgt_jnt_deg_mat[k][l] = max(x+1, 2)
return tgt_jnt_deg_mat
def adjust_target_joint_degree_matrix(est_n, est_aved, est_jdd, tgt_deg_vec, min_jnt_deg_mat, tgt_jnt_deg_mat):
# Adjust the target joint degree matrix
degree_k1_set = set(tgt_deg_vec.keys())
if 1 not in degree_k1_set:
degree_k1_set.add(1)
degree_k1_set = sorted(list(degree_k1_set), reverse=True)
for k1 in degree_k1_set:
target_sum = k1*tgt_deg_vec[k1]
present_sum = sum(tgt_jnt_deg_mat[k1].values())
diff = target_sum - present_sum
if diff == 0:
continue
degree_k2_set = set([k2 for k2 in degree_k1_set if k2 <= k1])
if k1 == 1 and abs(target_sum - present_sum) % 2 != 0:
tgt_deg_vec[1] += 1
target_sum += 1
while target_sum != present_sum:
if target_sum > present_sum:
degree_k2_candidate = {}
for k2 in degree_k2_set:
if present_sum == target_sum - 1 and k2 == k1:
continue
x = est_jdd[k1][k2]*est_n*est_aved
y = float(tgt_jnt_deg_mat[k1][k2])
if x == 0:
delta_e = float("inf")
else:
if k2 != k1:
delta_e = float(math.fabs(x-y-1))/x - float(math.fabs(x-y))/x
else:
delta_e = float(math.fabs(x-y-2))/x - float(math.fabs(x-y))/x
degree_k2_candidate[k2] = delta_e
k2 = select_random_key_with_smallest_value(degree_k2_candidate)
tgt_jnt_deg_mat[k1][k2] += 1
tgt_jnt_deg_mat[k2][k1] += 1
if k1 != k2:
present_sum += 1
else:
present_sum += 2
else:
degree_k2_candidate = {}
for k2 in degree_k2_set:
if tgt_jnt_deg_mat[k1][k2] <= min_jnt_deg_mat[k1][k2]:
continue
if present_sum == target_sum + 1 and k2 == k1:
continue
x = est_jdd[k1][k2] * est_n * est_aved
y = float(tgt_jnt_deg_mat[k1][k2])
if x == 0:
delta_e = float("inf")
else:
if k2 != k1:
delta_e = float(math.fabs(x - y + 1))/x - float(math.fabs(x - y))/x
else:
delta_e = float(math.fabs(x - y + 2))/x - float(math.fabs(x - y))/x
degree_k2_candidate[k2] = delta_e
if len(degree_k2_candidate) > 0:
k2 = select_random_key_with_smallest_value(degree_k2_candidate)
tgt_jnt_deg_mat[k1][k2] -= 1
tgt_jnt_deg_mat[k2][k1] -= 1
if k1 != k2:
present_sum -= 1
else:
present_sum -= 2
else:
if k1 > 1:
target_sum += k1
tgt_deg_vec[k1] += 1
else:
target_sum += 2
tgt_deg_vec[1] += 2
return [tgt_jnt_deg_mat, tgt_deg_vec]
def modify_target_joint_degree_matrix(subG:graph.Graph, est_n, est_aved, est_jdd, tgt_node_deg, tgt_deg_vec, tgt_jnt_deg_mat):
degree_set = set(tgt_deg_vec.keys())
if 1 not in degree_set:
degree_set.add(1)
degree_set = set(sorted(list(degree_set)))
subG_jnt_deg_mat = defaultdict(lambda: defaultdict(int))
for v in subG.nodes:
k1 = tgt_node_deg[v]
for w in subG.nlist[v]:
k2 = tgt_node_deg[w]
subG_jnt_deg_mat[k1][k2] += 1
for k1 in subG_jnt_deg_mat:
for k2 in subG_jnt_deg_mat[k1]:
while subG_jnt_deg_mat[k1][k2] > tgt_jnt_deg_mat[k1][k2]:
tgt_jnt_deg_mat[k1][k2] += 1
tgt_jnt_deg_mat[k2][k1] += 1
degree_k3_candidates = {}
for k3 in degree_set:
if k3 == k1 or tgt_jnt_deg_mat[k2][k3] <= subG_jnt_deg_mat.get(k2, {}).get(k3, 0):
continue
x = est_jdd[k2][k3]*est_n*est_aved
y = tgt_jnt_deg_mat[k2][k3]
if x == 0:
delta_e = float("inf")
else:
if k2 != k3:
delta_e = float(math.fabs(x-y+1))/x - float(math.fabs(x-y))/x
else:
delta_e = float(math.fabs(x-y+2))/x - float(math.fabs(x-y))/x
degree_k3_candidates[k3] = delta_e
k3 = -1
if len(degree_k3_candidates) > 0:
k3 = select_random_key_with_smallest_value(degree_k3_candidates)
tgt_jnt_deg_mat[k2][k3] -= 1
tgt_jnt_deg_mat[k3][k2] -= 1
degree_k4_candidates = {}
for k4 in degree_set:
if k4 == k2 or tgt_jnt_deg_mat[k1][k4] <= subG_jnt_deg_mat.get(k1, {}).get(k4, 0):
continue
x = est_jdd[k1][k4] * est_n * est_aved
y = tgt_jnt_deg_mat[k1][k4]
if x == 0:
delta_e = float("inf")
else:
if k1 != k4:
delta_e = float(math.fabs(x - y + 1)) / x - float(math.fabs(x - y)) / x
else:
delta_e = float(math.fabs(x - y + 2)) / x - float(math.fabs(x - y)) / x
degree_k3_candidates[k4] = delta_e
if len(degree_k4_candidates) > 0:
k4 = select_random_key_with_smallest_value(degree_k4_candidates)
tgt_jnt_deg_mat[k4][k1] -= 1
tgt_jnt_deg_mat[k1][k4] -= 1
if k3 > 0:
tgt_jnt_deg_mat[k3][k4] += 1
tgt_jnt_deg_mat[k4][k3] += 1
[tgt_jnt_deg_mat,tgt_deg_vec] = adjust_target_joint_degree_matrix(est_n,est_aved,est_jdd,tgt_deg_vec,subG_jnt_deg_mat,tgt_jnt_deg_mat)
return [subG_jnt_deg_mat, tgt_jnt_deg_mat, tgt_deg_vec]
def graph_restoration_method(samplinglist, test=True):
genG = graph.Graph()
# (1) construct the subgraph
node_index = {}
edges_to_add = []
# (1-1) index queried nodes
i = 0
for data in samplinglist:
v = data.index
if v not in node_index:
node_index[v] = i
i += 1
genG.qry_nodes = set(range(0, i))
# (1-2) index visible nodes
marked = set()
for data in samplinglist:
v = data.index
if v in marked:
continue
marked.add(v)
for w in data.nlist:
if w not in node_index:
node_index[w] = i
i += 1
if w not in marked:
edges_to_add.append([node_index[v], node_index[w]])
genG.vis_nodes = set(range(len(genG.qry_nodes), i))
# (1-3) Construct the subgraph
genG.nodes = genG.qry_nodes | genG.vis_nodes
for [v, w] in edges_to_add:
graph.add_edge(genG, v, w)
# (1-4) If there are no visible nodes, return the subgraph.
if len(genG.vis_nodes) == 0:
genG.N = len(genG.nodes)
genG.M = 0
genG.maxd = 0
for v in genG.nodes:
d = len(genG.nlist[v])
genG.M += d
if d > genG.maxd:
genG.maxd = d
genG.M = int(genG.M/2)
return genG
# (2) Construct the target degree vector
est_n = estimation.size_estimator(samplinglist)
est_dd = estimation.degree_distribution_estimator(samplinglist)
tgt_deg_vec = initialize_target_degree_vector(est_n, est_dd)
tgt_deg_vec = adjust_target_degree_vector(est_n, est_dd, tgt_deg_vec)
[subG_deg_vec, tgt_deg_vec, tgt_node_deg] = modify_target_degree_vector(genG,est_n,est_dd,tgt_deg_vec)
# (3) Construct the target joint degree matrix
est_aved = estimation.average_degree_estimator(samplinglist)
est_jdd = estimation.JDD_estimator_hybrid(samplinglist,est_n,est_aved)
tgt_jnt_deg_mat = initialize_target_joint_degree_matrix(est_n,est_aved,est_jdd)
min_jnt_deg_mat = defaultdict(lambda: defaultdict(int))
[tgt_jnt_deg_mat, tgt_deg_vec] = adjust_target_joint_degree_matrix(est_n,est_aved,est_jdd,tgt_deg_vec,min_jnt_deg_mat,tgt_jnt_deg_mat)
[subG_jnt_deg_mat, tgt_jnt_deg_mat, tgt_deg_vec] = modify_target_joint_degree_matrix(genG,est_n,est_aved,est_jdd,tgt_node_deg,tgt_deg_vec,tgt_jnt_deg_mat)
if test:
if not check_deg_vec(tgt_deg_vec):
print("Error: Target degree vector does not satisfy realization conditions.")
exit()
if not check_jnt_deg_mat(tgt_jnt_deg_mat, tgt_deg_vec):
print("Error: Target joint degree matrix does not satisfy realization conditions.")
exit()
for d in subG_deg_vec:
if tgt_deg_vec[d] < subG_deg_vec[d]:
print("Error: The target number of nodes with degree " + str(d) + "is smaller than the number of nodes with target degree " + str(d) + " in the subgraph.")
exit()
for k1 in subG_jnt_deg_mat:
for k2 in subG_jnt_deg_mat[k1]:
if tgt_jnt_deg_mat[k1][k2] < subG_jnt_deg_mat[k1][k2]:
print("Error: The target number of edges between nodes with degree " + str(k1) + " and nodes with degree " + str(k2) + " is smaller than the number of edges between nodes with target degree " + str(k1) + " and nodes with target degree " + str(k2) + " in the subgraph.")
exit()
# (4) Construct a graph that preserves the target degree vector and the target joint degree matrix
# (4-1) Determine the target number of nodes
tgt_N = sum(list(tgt_deg_vec.values()))
subG_N = len(genG.nodes)
for v in range(subG_N, tgt_N):
genG.nodes.add(v)
# (4-2) Assign the target degree to each added node
deg_seq = []
for d in tgt_deg_vec:
for i in range(0, tgt_deg_vec[d]-subG_deg_vec[d]):
deg_seq.append(d)
cur_deg_vec = defaultdict(int)
for d in tgt_deg_vec:
cur_deg_vec[d] = subG_deg_vec[d]
random.shuffle(deg_seq)
for v in range(subG_N, tgt_N):
d = deg_seq.pop()
tgt_node_deg[v] = d
cur_deg_vec[d] += 1
if test:
for d in tgt_deg_vec:
if tgt_deg_vec[d] != cur_deg_vec[d]:
print("Error: A generated graph does not preserve the target degree vector.")
exit()
for d in cur_deg_vec:
if cur_deg_vec[d] != tgt_deg_vec[d]:
print("Error: A generated graph does not preserve the target degree vector.")
exit()
# (4-3) Make stub list
stub_list = defaultdict(list)
for v in genG.nodes:
d = tgt_node_deg[v]
subG_d = len(genG.nlist[v])
for i in range(0, d-subG_d):
stub_list[d].append(v)
for d in stub_list:
random.shuffle(stub_list[d])
# (4-4) Connect each free stub of nodes with degree k1 and degree k2 uniformly at random
cur_jnt_deg_mat = defaultdict(lambda: defaultdict(int))
for k1 in tgt_jnt_deg_mat:
for k2 in tgt_jnt_deg_mat[k1]:
cur_jnt_deg_mat[k1][k2] = subG_jnt_deg_mat[k1][k2]
for k1 in tgt_jnt_deg_mat:
for k2 in tgt_jnt_deg_mat[k1]:
while cur_jnt_deg_mat[k1][k2] != tgt_jnt_deg_mat[k1][k2]:
u = stub_list[k1].pop()
v = stub_list[k2].pop()
graph.add_edge(genG, u, v)
cur_jnt_deg_mat[k1][k2] += 1
cur_jnt_deg_mat[k2][k1] += 1
if test:
for k1 in tgt_jnt_deg_mat:
for k2 in tgt_jnt_deg_mat[k1]:
if tgt_jnt_deg_mat[k1][k2] != cur_jnt_deg_mat[k1][k2]:
print("Error: A generated graph does not preserve the target joint degree matrix.")
exit()
for k1 in cur_jnt_deg_mat:
for k2 in cur_jnt_deg_mat[k1]:
if tgt_jnt_deg_mat[k1][k2] != cur_jnt_deg_mat[k1][k2]:
print("Error: A generated graph does not preserve the target joint degree matrix.")
exit()
genG.M = 0
genG.maxd = 0
for v in genG.nodes:
d = len(genG.nlist[v])
genG.M += d
if d > genG.maxd:
genG.maxd = d
genG.M = int(genG.M/2)
# (5) Targeting-rewiring process
est_ddcc = estimation.degree_dependent_clustering_coefficient_estimator(samplinglist)
rewirable_edges = []
for v in range(len(genG.qry_nodes), len(genG.nodes)):
for w in genG.nlist[v]:
if w >= v and w >= len(genG.qry_nodes):
rewirable_edges.append([v, w])
if test:
rewiring.check_update_num_tri(genG,rewirable_edges)
genG = rewiring.targeting_rewiring_for_clustering(genG,est_ddcc,rewirable_edges)
return genG
|
from kinematics import *
from generator import *
import blender_utils
import sympy
import sys
def main(argv):
if len(argv) < 2:
print("usage : " + argv[0] + " <blend_file> [<endpoint>]")
exit()
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
blender_utils.call_blender_export(argv[1])
if len(argv) == 2:
l = blender_utils.read_json("blender.out.json")
for e in l:
print(e["name"])
elif len(argv) == 3:
endpoint = argv[2].replace("/", "_")
chain = blender_utils.extract_chain(
blender_utils.read_json("blender.out.json"),
argv[2]
)
chain.name = chain.name.replace("/", "_")
gen = CppProjectGenerator()
add_ik_module(gen, chain)
for fname in gen:
with open(fname, "w+") as f:
f.write(str(gen[fname]))
|
import json
import pickle
with open("data/drinks_dict.txt") as f:
content = f.read()
data = json.loads(content.replace("'", '"').replace("\n", ''))
# Generation of database of drinks
drinks_db = {}
count = 0
# Generation of user goals
drinks_user_goals = []
drinks_user_goals.append({'request_slots': {'DRINK': 'UNK', 'SIZE': 'UNK', 'TEMP': 'UNK'}, 'diaact': 'request', 'inform_slots': {}})
for drink_ind, drink in enumerate(data["DRINK"]):
drinks_user_goals.append({'request_slots': {'SIZE': 'UNK', 'TEMP': 'UNK'}, 'diaact': 'request', 'inform_slots': {'DRINK': drink}})
for temp_ind, temp in enumerate(data["TEMP"]):
if data["Temps"][drink_ind][temp_ind] == '1':
drinks_user_goals.append({'request_slots': {'SIZE': 'UNK', 'DRINK': 'UNK'}, 'diaact': 'request', 'inform_slots': {'TEMP': temp}})
drinks_user_goals.append({'request_slots': {'SIZE': 'UNK'}, 'diaact': 'request', 'inform_slots': {'TEMP': temp, 'DRINK': drink}})
for size_ind, size in enumerate(data["SIZE"]):
if data["Sizes"][drink_ind][size_ind] == '1':
drinks_db[count] = {'DRINK': drink, 'SIZE': size, 'TEMP': temp}
count += 1
drinks_user_goals.append({'request_slots': {}, 'diaact': 'request', 'inform_slots': {'DRINK': drink,
'SIZE': size, 'TEMP': temp}})
drinks_user_goals.append({'request_slots': {'TEMP': 'UNK'}, 'diaact': 'request', 'inform_slots': {'SIZE': size, 'DRINK': drink}})
# write to pickle files
with open('data/drinks_db.pkl', 'wb') as handle:
pickle.dump(drinks_db, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('data/drinks_user_goals.pkl', 'wb') as handle2:
pickle.dump(drinks_user_goals, handle2, protocol=pickle.HIGHEST_PROTOCOL)
with open('data/drinks_dict.pkl', 'wb') as handle3:
pickle.dump(data, handle3, protocol=pickle.HIGHEST_PROTOCOL)
# write to text files
with open("data/drinks_db.txt", "w") as f:
for key, value in drinks_db.items():
print(key, value)
f.write('{}: {}'.format(key, value) + "\n")
with open("data/drinks_user_goals.txt", 'w') as output:
for row in drinks_user_goals:
output.write(str(row) + '\n')
print("Nice job") |
import urllib, json
import requests
from django.core.management.base import BaseCommand
from France.models import vaccinFrance, regionsFrance
class Command(BaseCommand):
help = 'Reload regions data'
def handle(self, *args, **kwargs):
r = requests.get('https://www.data.gouv.fr/fr/datasets/r/16cb2df5-e9c7-46ec-9dbf-c902f834dab1')
if not r and not r.json():
self.stdout.write("Error. Not update")
else:
vaccinFrance.objects.all().delete()
for d in r.json():
v = vaccinFrance(date=d['date'],
region= regionsFrance.objects.get(code=d['code']),
totalVaccines=d['totalVaccines'])
v.save()
self.stdout.write("Done") |
import os
from pathlib import Path
from jinja2 import Environment, FileSystemLoader, select_autoescape
from sanic import Sanic, response
class Springboard:
def __init__(self, sanic: Sanic, frontend_dir: Path = Path('./frontend'),
add_html_routes: bool = True):
self.sanic = sanic
self._frontend = frontend_dir
self.sanic.static('/', str(frontend_dir.absolute()))
self.template_dict = {}
self.env = Environment(
loader=FileSystemLoader(str(frontend_dir.absolute())),
autoescape=select_autoescape(['html', 'xml'])
)
# noinspection PyUnusedLocal
async def index(request):
return await response.file(
str((frontend_dir / 'index.html').absolute()))
self.sanic.add_route(index, '/')
if add_html_routes:
self._step_and_add_frontend_route(self._frontend)
def localhost(self, port=80):
"""
Starts running the Sanic app at localhost
:param port: port to host the server through
"""
print(
f'Serving at http://localhost{":" if port == 80 else f":{port}"}')
self.sanic.run(host='0.0.0.0', port=port)
def _step_and_add_frontend_route(self, path: Path):
"""
Recursive function that populates the app with paths that represent
the html files in the frontend directory
:param path: path to folder or file
"""
for f in os.listdir(str(path.absolute())):
full_path = path / f
if full_path.is_dir() and not full_path.is_symlink():
self._step_and_add_frontend_route(full_path)
elif full_path.suffix.lower() == '.html':
relative_path = full_path.relative_to(self._frontend)
uri = relative_path.with_suffix('')
# noinspection PyUnusedLocal
async def resource(request):
template = self.env.get_template(str(full_path.absolute()))
return await response.html(
template.render(self.template_dict))
self.sanic.add_route(resource, str(uri))
|
"""
Car类
"""
class Car(object):
description = ['大众','丰田','广本','沃尔沃','凯迪拉克']
def __init__(self, l, w, h, brand):
self.L = l
self.W = w
self.H = h
self.brand = brand
def modify_des(self):
if self.description != None:
return self.description
else:
print('请输入您的车辆描述')
@staticmethod
def basic_parameters():
print('已完成车辆基本参数信息的录入!')
@classmethod
def upkeep(cls, desc):
if desc in cls.description:
print('根据汽车保养的相关经验,{}品牌的车应于5000km/次的频率进行专业性保养'.format(desc))
else:
print('非常抱歉!{}品牌不在我们的保养范围内'.format(desc))
if __name__ == '__main__':
car_1 = Car(3, 2, 1.5, '大众')
car_1.basic_parameters()
if car_1.modify_des():
car_1.upkeep(car_1.brand)
else:
print('请正确填写相关的车辆信息')
|
from flask import json, request
from app.dao.money import get_acc_id, get_acc_descs, get_accsums, get_cat_id, get_cats, get_money_acc, \
get_money_item_row, get_money_items, post_money_acc, post_money_item
from app.dao.dashboard import get_user_data_content
from app.dao.income import get_income_items
from app.main.functions import money
from app.models import Income, IncomeAlloc, MoneyAcc, MoneyCat, MoneyItem
def collect_search_filter():
return {'payer': request.form.get('payer') or '',
'memo': request.form.get('memo') or '',
'cleared': request.form.get('cleared') or '',
'category': request.form.get('category') or ''}
def mget_money_acc(acc_id):
return get_money_acc(acc_id) if acc_id != 0 else MoneyAcc(id=0, acc_name='', bank_name='',
sort_code='', acc_num='', acc_desc='')
# if acc_id == 0:
# money_acc = MoneyAcc()
# money_acc.id = 0
def mget_moneydict(type="basic"):
# return options for multiple choice controls in money_item and money_items pages
acc_descs = get_acc_descs()
cats = get_cats()
cleareds = ["cleared", "uncleared"]
money_dict = {
"acc_descs": acc_descs,
"cats": cats,
"cleareds": cleareds,
}
if type == "plus_all":
money_dict["acc_descs"].insert(0, "all accounts")
money_dict["cats"].insert(0, "all categories")
money_dict["cleareds"].insert(0, "all cleareds")
return money_dict
def mget_money_items(acc_id): # we assemble income items and money items into one display - tricky
uncleared = False
money_filter = []
income_filter = [Income.paytype_id != 1] # we do not want cheque income displayed, as not cleared income
moneyvals= {'acc_id': acc_id, 'acc_desc': 'all accounts'}
if acc_id != 0: # filter for money account id, otherwise items for all accounts
money_filter.append(MoneyAcc.id == acc_id)
income_filter.append(MoneyAcc.id == acc_id)
account = MoneyAcc.query.filter(MoneyAcc.id == acc_id).one_or_none()
moneyvals['acc_desc'] = account.acc_desc
search_id = request.args.get('search_id', 0, type=int)
if request.method == "POST" or search_id != 0:
if request.method == 'GET' and search_id != 0:
search = get_user_data_content(search_id)
fdict = json.loads(search)
fdict['acc_desc'] = 'all accounts'
else:
fdict = {'payer': request.form.get("payer") or "all",
'memo': request.form.get("memo") or "all",
'acc_desc': request.form.get("acc_desc") or "all accounts",
'cleared': request.form.get("cleared") or "all",
'category': request.form.get("category") or "all categories"}
payer = fdict.get("payer")
if payer != "all":
money_filter.append(MoneyItem.payer.ilike('%{}%'.format(payer)))
income_filter.append(Income.payer.ilike('%{}%'.format(payer)))
moneyvals['payer'] = payer
memo = fdict.get("memo")
if memo != "all":
money_filter.append(MoneyItem.memo.ilike('%{}%'.format(memo)))
income_filter.append(IncomeAlloc.rentcode.ilike('%{}%'.format(memo)))
moneyvals['memo'] = memo
acc_desc = fdict.get("acc_desc")
if acc_desc != "all accounts":
money_filter.append(MoneyAcc.acc_desc.ilike('%{}%'.format(acc_desc)))
income_filter.append(MoneyAcc.acc_desc.ilike('%{}%'.format(acc_desc)))
moneyvals['acc_desc'] = acc_desc
clearedval = fdict.get("cleared")
moneyvals['cleared'] = clearedval
if clearedval == "cleared":
money_filter.append(MoneyItem.cleared == 1)
elif clearedval == "uncleared":
uncleared = True
money_filter.append(MoneyItem.cleared == 0)
catval = fdict.get("category")
if catval != "all categories":
money_filter.append(MoneyCat.cat_name == catval)
if catval != "lulu bacs income":
income_filter.append(Income.id == 0)
moneyvals['category'] = catval
accsums, transitems = get_money_and_income_items(acc_id, money_filter, income_filter) if not uncleared else \
get_money_items_only(acc_id, money_filter)
return accsums, moneyvals, transitems
def get_money_and_income_items(acc_id, money_filter, income_filter):
money_items = get_money_items(money_filter)
income_items = get_income_items(income_filter)
accsums = get_accsums(acc_id)
for income_item in income_items:
income_item.cat_name = 'BACS income'
income_item.cleared = 'X'
transitems = sorted(money_items + income_items, key=lambda r: r.date, reverse=True)
return accsums, transitems
def get_money_items_only(acc_id, money_filter):
transitems = get_money_items(money_filter)
accsums = get_accsums(acc_id)
return accsums, transitems
def mpost_money_acc(acc_id):
moneyacc = get_money_acc(acc_id) if acc_id != 0 else MoneyAcc()
moneyacc.bank_name = request.form.get("bank_name")
moneyacc.acc_name = request.form.get("acc_name")
moneyacc.sort_code = request.form.get("sort_code")
moneyacc.acc_num = request.form.get("acc_num")
moneyacc.acc_desc = request.form.get("acc_desc")
acc_id = post_money_acc(moneyacc)
return acc_id
def mpost_money_banking(acc_id, amount, today):
money_item = MoneyItem()
money_item.date = today
money_item.payer = 'cheques paid in'
money_item.amount = amount
money_item.memo = 'lulu cheque income'
money_item.cat_id = 35
money_item.cleared = 0
money_item.acc_id = acc_id
_ = post_money_item(money_item)
def mpost_money_item(mode, money_item_id):
if mode in ('clone', 'new'):
money_item = MoneyItem()
else:
money_item = get_money_item_row(money_item_id)
money_item.date = request.form.get("paydate")
money_item.amount = request.form.get("amount")
money_item.payer = request.form.get("payer")
money_item.num = request.form.get("number")
money_item.memo = request.form.get("memo")
cleared = request.form.get("cleared")
money_item.cleared = 1 if cleared == "cleared" else 0
cat_name = request.form.get("category")
money_item.cat_id = get_cat_id(cat_name)
acc_desc = request.form.get("acc_desc")
acc_id = get_acc_id(acc_desc)
money_item.acc_id = acc_id
money_item_id = post_money_item(money_item)
return acc_id, money_item_id
def mpost_transfer():
t_date = request.form.get('t_date')
payer = request.form.get('payer')
amount = money(request.form.get("amount"))
acc_from = request.form.get("acc_from")
acc_to = request.form.get("acc_to")
item_out = MoneyItem()
item_out.date = t_date
item_out.amount = -amount
item_out.payer = payer
item_out.memo = "transfer to " + acc_to
item_out.cat_id = 53
item_out.cleared = 1
item_out.acc_id = get_acc_id(acc_from)
item_in = MoneyItem()
item_in.date = t_date
item_in.amount = amount
item_in.payer = payer
item_in.memo = "transfer from " + acc_from
item_in.cat_id = 53
item_in.cleared = 1
item_in.acc_id = get_acc_id(acc_to)
item_in_id = post_money_item(item_in)
item_out_id = post_money_item(item_out)
return item_in_id, item_out_id |
import zmq
context = zmq.Context()
# Socket to talk to server
print("Connecting to hello world server…")
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:9779")
out_command = ""
while out_command.lower() != "exit":
out_command = input("Command to serial port: ")
socket.send_string(out_command)
# Get the reply.
message = socket.recv()
print("Received response for: %s [ %s ]" % (out_command.encode('ascii'), message.decode('ascii')))
|
import sys
import traceback
import threading
import logging
from util import init_logger, tohexs, s2b, uunq
from dataparser import RoughParser
import select
import socket
import time
import abc
import event
import json
from device.devicect10 import SendBufferTimer
from action import ActionPing, RV_DATA_WAIT
_LOGGER = init_logger(__name__, level=logging.DEBUG)
if sys.version_info < (3, 0):
import SocketServer
import SimpleHTTPServer
else:
long = int
import socketserver as SocketServer
import http.server as SimpleHTTPServer
class TCPServerHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
# def __init__(self):
# super(ServerHandler,self).__init__()
# self.stopped = True
def stop(self):
self.stopped = True
if self.request is not None:
try:
self.request.close()
self.request = None
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
def handle(self):
self.stopped = False
keyv = '{}:{}'.format(*self.client_address)
threading.currentThread().name = ("TCPServerHandler")
_LOGGER.info(keyv + " connected")
self.request.setblocking(0)
olddata = b''
serv = self.server.s
serv.setclienthandler(self.client_address, self)
wlist = []
remain = b''
disconnectat = 0
parser = RoughParser()
while not serv.stopped and not self.stopped:
try:
ready = select.select([self.request], wlist, [], 0.5)
if disconnectat > 0 and time.time() >= disconnectat:
break
if ready[0]:
data = self.request.recv(4096)
if len(data) > 0:
_LOGGER.info(
"RTCP [" + keyv + "/" + str(len(data)) + "] <-" + tohexs(data))
data = olddata + data
while True:
dictout = parser.parse(
serv.getclientinfo(self.client_address), data)
rv = dictout['idxout']
if 'disconnecttimer' in dictout:
disconnectat = dictout['disconnecttimer']
if 'reply' in dictout:
remain += dictout['reply']
del dictout['reply']
if rv and rv > 0:
tp = dictout['type']
if tp == b"mfz" or tp == b"cry":
serv.setclientinfo(
self.client_address, dictout)
data = data[rv:]
if len(data):
continue
elif rv == RoughParser.DISCARD_BUFFER:
data = b''
break
olddata = data
else:
raise Exception("Readline failed: connection closed?")
if ready[1] or len(wlist) == 0:
if len(remain) == 0:
remain = serv.dowrite(self.client_address)
if len(remain) > 0:
_LOGGER.info("Sending packet to %s:%d" %
self.client_address)
nb = self.request.send(remain)
_LOGGER.info("Sent")
# if tp=="cry":
# _LOGGER.info("STCP ["+keyv+"/"+str(len(remain))+"/"+str(nb)+"] <-"+remain.encode('hex'))
remain = remain[nb:]
wlist = [self.request]
else:
wlist = []
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
break
_LOGGER.info(keyv + " DISCONNECTED")
serv.unsetclientinfo(self.client_address)
_LOGGER.info(keyv + " DELETED")
self.stop()
class EthSender(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def send_packet(self, addr, packet):
"""Retrieve data from the input source and return an object."""
return -1
class TCPClient(EthSender):
def __init__(self, timeo):
super(TCPClient, self).__init__()
self.timeout = timeo
def stop(self):
pass
def send_packet(self, addr, packet):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
try:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(bytearray(packet))
sock.close()
return len(packet)
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return -1
class HTTPServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
RESP_UNINIT = -1
RESP_WAIT = 0
RESP_OK = 1
def __init__(self, request, client_address, server):
self.resp_status = HTTPServerHandler.RESP_UNINIT
self.resp_val = {}
event.EventManager.on('ActionParsed', self.schedule_response)
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def setup(self):
SimpleHTTPServer.SimpleHTTPRequestHandler.setup(self)
self.request.settimeout(60)
def log(self, msg):
_LOGGER.info(
f"[{self.__class__.__name__}] ({self.client_address[0]}:{self.client_address[1]}) -> {msg}")
def schedule_response(self, randid, action, **kwargs):
self.log("action parsed")
if action is not None:
if randid == self.client_address[1]:
self.server.s.schedule_action(randid, self)
self.log("action scheduled")
else:
self.resp_val = {'action': None, 'retval': None}
self.resp_status = HTTPServerHandler.RESP_OK
self.log("schedule_response resp OK")
def write_response_base(self, obj):
self.protocol_version = 'HTTP/1.1'
if 'action' in obj and obj['action'] is not None:
self.send_response(200, 'OK')
else:
self.send_response(403, 'Forbidden')
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(s2b(json.dumps(obj)))
def write_response(self, device, action, retval):
self.resp_val = {'action': action, 'retval': retval}
self.resp_status = HTTPServerHandler.RESP_OK
self.log("write_response resp OK")
def do_GET(self):
start_wait = time.time()
self.log(uunq(self.path[1:]))
self.resp_status = HTTPServerHandler.RESP_WAIT
event.EventManager.fire(eventname='ExtInsertAction',
cmdline=str(self.client_address[1]) + " " +
uunq(self.path[1:]), action=None)
while self.resp_status == HTTPServerHandler.RESP_WAIT and not self.server.s.stopped:
time.sleep(0.2)
if time.time() - start_wait > 30:
self.resp_val = {}
break
self.log("write response NOW")
self.write_response_base(self.resp_val)
# Write the response
# self.path = '/'
# return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class HTTPServer(threading.Thread):
def __init__(self, tcpport):
super(HTTPServer, self).__init__()
self.port = tcpport
self.server = None
self.stopped = True
self.actions = {}
self.cond = threading.Condition()
self.name = ("HTTPServer")
def stop(self):
if self.server is not None:
self.cond.acquire()
self.stopped = True
self.actions = {}
self.cond.release()
self.server.shutdown()
# self.server.socket.shutdown(SHUT_RDWR)
# self.server.socket.close()
self.server = None
else:
self.stopped = True
def schedule_action(self, randid, client):
if not self.stopped:
self.cond.acquire()
self.actions[str(randid)] = client
self.cond.release()
def run(self):
event.EventManager.on('ActionDone', self.handle_action_done)
self.stopped = False
while not self.stopped:
try:
self.server = SocketServer.ThreadingTCPServer(
("0.0.0.0", self.port), HTTPServerHandler)
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
time.sleep(5)
if self.server is not None:
self.server.s = self
self.server.serve_forever()
def handle_action_done(self, device, action, retval, **kwargs):
if not self.stopped:
s = str(action.randomid)
client = None
_LOGGER.info("Searching http client")
self.cond.acquire()
if s in self.actions:
client = self.actions[s]
del self.actions[s]
self.cond.release()
if client is not None:
client.log("Client found")
client.write_response(device, action, retval)
class TCPServer(threading.Thread):
def __init__(self, tcpport):
super(TCPServer, self).__init__()
SocketServer.TCPServer.allow_reuse_address = True
self.port = tcpport
self.server = None
self.stopped = True
self.towrite = {}
self.cond = threading.Condition()
self.clientinfo = {}
self.clienthandler = {}
self.timer = None
self.name = ("TCPServer")
def stop(self):
if self.server is not None:
self.cond.acquire()
self.stopped = True
self.towrite = {}
self.cond.release()
self.clientinfo = {}
self.clienthandler = {}
self.server.shutdown()
# self.server.socket.shutdown(SHUT_RDWR)
# self.server.socket.close()
self.server = None
if self.timer is not None:
self.timer.cancel()
self.timer = None
else:
self.stopped = True
def setclientinfo(self, addr, dictout):
keyv = '{}:{}'.format(*addr)
self.cond.acquire()
self.clientinfo[keyv] = dictout
self.cond.release()
def setclienthandler(self, addr, handler):
keyv = '{}:{}'.format(*addr)
self.cond.acquire()
self.clienthandler[keyv] = handler
self.cond.release()
def getclientinfo(self, addr):
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
if keyv in self.clientinfo:
ci = self.clientinfo[keyv]
else:
ci = {'addr': addr}
self.cond.release()
return ci
def unsetclientinfo(self, addr):
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
# _LOGGER.info("02_unsetting %s" %keyv)
if keyv in self.towrite:
# _LOGGER.info("02_found in towrite")
for x in self.towrite[keyv]:
if isinstance(x, SendBufferTimer):
x.set_finished(None)
# _LOGGER.info("02_setfinish")
del self.towrite[keyv]
if keyv in self.clientinfo:
# _LOGGER.info("02_found in clientinfo")
del self.clientinfo[keyv]
if keyv in self.clienthandler:
# _LOGGER.info("02_found in clienthandler")
self.clienthandler[keyv].stop()
del self.clienthandler[keyv]
self.cond.release()
def handle_action_done(self, device, action, retval, **kwargs):
if isinstance(action, ActionPing) and self.timer is not None:
self.timer_ping_init()
threading.currentThread().name = ("handle_action_done")
strout = json.dumps({'action': action, 'retval': retval}) + '\n'
if device is not None and action is not None:
lst = action.mqtt_publish_onfinish(retval)
lst.extend(device.mqtt_publish_onfinish(action, retval))
device.mqtt_publish_all(lst)
self.schedulewrite(strout)
def timer_ping_init(self):
if self.timer is not None:
self.timer.cancel()
self.timer = threading.Timer(
60, self.handle_action_done, (None, ActionPing(), 1,))
self.timer.name = ("timerping")
self.timer.daemon = True
self.timer.start()
def run(self):
event.EventManager.on('ActionDone', self.handle_action_done)
self.stopped = False
self.timer_ping_init()
while not self.stopped:
try:
self.server = SocketServer.ThreadingTCPServer(
("0.0.0.0", self.port), TCPServerHandler)
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
time.sleep(5)
if self.server is not None:
self.server.s = self
self.server.serve_forever()
def dowrite(self, addr):
snd = b''
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
if not self.stopped and keyv in self.clientinfo and keyv in self.towrite:
while len(snd) == 0 and len(self.towrite[keyv]) > 0:
snd = self.towrite[keyv][0]
if isinstance(snd, (bytes, str)):
snd = s2b(self.towrite[keyv].pop(0))
# _LOGGER.info("01_1")
elif snd.timer is not None: # dobbiamo aspettare la risposta
snd = b''
# _LOGGER.info("01_2")
break
elif snd.has_succeeded() or snd.has_failed():
if "sender" in self.clientinfo[keyv]:
del self.clientinfo[keyv]['sender']
if snd.has_failed():
self.clientinfo[keyv]['disconnecttimer'] = time.time()
self.towrite[keyv].pop(0)
snd = b''
# _LOGGER.info("01_3")
else: # dobbiamo ancora spedire il pacchetto o c'e gia stato un timeout ma dobbiamo fare altri tentativi
snd.clientinfo = self.clientinfo[keyv]
self.clientinfo[keyv]['sender'] = snd
snd = snd.schedule()
# _LOGGER.info("01_4")
self.cond.release()
return snd
def innerschedule(self, keyv, w):
if keyv not in self.towrite:
self.towrite[keyv] = []
if isinstance(w, list):
self.towrite[keyv].extend(w)
else:
self.towrite[keyv].append(w)
def get_connected_clients(self):
lst = dict()
for _, v in self.clientinfo.items():
if 'device' in v:
keyv = '{}:{}'.format(*(v['hp']))
lst[keyv] = v['device']
return lst
def schedulewrite(self, w):
exitv = False
if not self.stopped and self.server is not None:
self.cond.acquire()
if not isinstance(w, SendBufferTimer):
for keyv, v in self.clientinfo.items():
if v['type'] == b'mfz':
self.innerschedule(keyv, w)
exitv = True
else:
keyv = '{}:{}'.format(*(w.addr))
if keyv not in self.clientinfo:
for keyv, v in self.clientinfo.items():
if 'mac' in v and v['mac'] == w.mac:
self.innerschedule(keyv, w)
exitv = True
break
else:
exitv = True
self.innerschedule(keyv, w)
self.cond.release()
return exitv
class EthBuffCont(object):
def __init__(self, ad, d):
self.data = d
self.addr = ad
class ListenerTh(threading.Thread, EthSender):
def send_packet(self, addr, packet):
try:
return self.socket.sendto(bytearray(packet), addr)
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return -1
def __init__(self, port, *args, **kwargs):
super(ListenerTh, self).__init__(*args, **kwargs)
self.port = port
self.stopped_ev = threading.Event()
self.stopped_ev.set()
self.preparse = RoughParser()
self.socket = None
self.stopped = True
self.name = ("ListenerTh")
def stop(self):
if self.socket:
self.stopped = True
self.socket.sendto(bytearray(b'a'), ('127.0.0.1', self.port))
self.stopped_ev.wait()
self.socket.close()
self.socket = None
def run(self):
""" Listen on socket. """
self.stopped_ev.clear()
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for opt in [socket.SO_BROADCAST, socket.SO_REUSEADDR]:
self.socket.setsockopt(socket.SOL_SOCKET, opt, 1)
self.socket.bind(('', self.port))
self.stopped = False
while not self.stopped:
try:
_LOGGER.info('enterrecv')
data, addr = self.socket.recvfrom(1024)
_LOGGER.info('1) recv %d (%s:%d) ' % (0 if not data else len(
data), 'unkn' if not addr else addr[0], 0 if not addr else addr[1]))
if data is not None and len(data):
self.preparse.parse(
addr, data if data[0:1] != b'@' else data + b'\n')['idxout']
_LOGGER.info('exitrecv')
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
self.stopped_ev.set()
class UdpManager(object):
def __init__(self, options):
self.port = options.port
self.retry = options.retry
self.timeout = options.timeout
self.broadcast_address = options.broadcast
self.remote = options.remote
self.listener = None
self.sender = None
self.buffer = {}
self.buffer_l = None
def add_to_buffer(self, key, hp, data, **kwargs):
self.buffer_l.acquire()
self.buffer[key] = EthBuffCont(hp, data)
self.buffer_l.notifyAll()
self.buffer_l.release()
def _udp_transact(self, hp, payload, handler, action, keyfind, timeout=-1, **kwargs):
""" Complete a UDP transaction.
UDP is stateless and not guaranteed, so we have to
take some mitigation steps:
- Send payload multiple times.
- Wait for awhile to receive response.
:param payload: Payload to send.
:param handler: Response handler.
:param args: Arguments to pass to response handler.
:param broadcast: Send a broadcast instead.
:param timeout: Timeout in seconds.
"""
u = self
keyv = keyfind(hp, payload)
u.buffer_l.acquire()
host = hp[0]
broadcast = host is None or (len(host) > 4 and host[-4:] == '.255')
if broadcast:
u.buffer.clear()
elif keyv in u.buffer:
del u.buffer[keyv]
u.buffer_l.release()
if timeout is None or timeout < 0:
timeout = u.timeout
if broadcast or u.remote:
host = u.broadcast_address
retval = None
hp2 = (host, u.port if not u.remote or hp[1] <= 0 else hp[1])
for dd in range(u.retry):
if len(payload) > 0 and retval != RV_DATA_WAIT:
try:
self.sender.send_packet(hp2, payload)
_LOGGER.info(f"S [{hp[0]}:{hp[1]}] -> {tohexs(payload)}")
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return None
if handler is None:
return 5
elif broadcast:
# _LOGGER.info('broadc')
time.sleep(timeout)
break
else:
# _LOGGER.info('no broadc')
u.buffer_l.acquire()
# _LOGGER.info('acquired')
buffcont = u.buffer.get(keyv, None)
if buffcont is None:
now = time.time()
once = False
while time.time() < now + timeout or not once:
# _LOGGER.info("waiting")
u.buffer_l.wait(timeout)
# _LOGGER.info("waiting f")
once = True
buffcont = u.buffer.get(keyv, None)
if buffcont is not None or u.listener is None:
break
u.buffer_l.release()
if u.listener is None:
return None
elif buffcont:
retval = handler(buffcont.addr, action,
buffcont.data, **kwargs)
# _LOGGER.info('Handler returned '+str(retval))
# Return as soon as a response is received
if retval is not None and retval != RV_DATA_WAIT:
break
else:
u.buffer_l.acquire()
del u.buffer[keyv]
u.buffer_l.release()
if retval == RV_DATA_WAIT:
dd -= 1
else:
retval = None
if broadcast:
u.buffer_l.acquire()
retval = handler(None, action, u.buffer, **kwargs)
u.buffer_l.release()
return retval
def configure(self):
if self.buffer_l is None:
self.buffer_l = threading.Condition()
self.listener = ListenerTh(self.port)
self.listener.start()
self.sender = self.listener if not self.remote else TCPClient(
self.timeout)
event.EventManager.on('RawDataReceived', self.add_to_buffer)
def stop(self):
if self.listener is not None:
_LOGGER.info("Stopping Listener Thread")
self.listener.stop()
_LOGGER.info("Listener Thread Stopped")
self.listener = None
if self.sender is not None:
_LOGGER.info("Stopping Sender")
self.sender.stop()
_LOGGER.info("Sender Stopped")
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/common/segments.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.common import criteria_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_criteria__pb2
from google.ads.google_ads.v3.proto.enums import ad_network_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_ad__network__type__pb2
from google.ads.google_ads.v3.proto.enums import click_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_click__type__pb2
from google.ads.google_ads.v3.proto.enums import conversion_action_category_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__action__category__pb2
from google.ads.google_ads.v3.proto.enums import conversion_attribution_event_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__attribution__event__type__pb2
from google.ads.google_ads.v3.proto.enums import conversion_lag_bucket_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__lag__bucket__pb2
from google.ads.google_ads.v3.proto.enums import conversion_or_adjustment_lag_bucket_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__or__adjustment__lag__bucket__pb2
from google.ads.google_ads.v3.proto.enums import day_of_week_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_day__of__week__pb2
from google.ads.google_ads.v3.proto.enums import device_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_device__pb2
from google.ads.google_ads.v3.proto.enums import external_conversion_source_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_external__conversion__source__pb2
from google.ads.google_ads.v3.proto.enums import hotel_date_selection_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__date__selection__type__pb2
from google.ads.google_ads.v3.proto.enums import hotel_price_bucket_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__price__bucket__pb2
from google.ads.google_ads.v3.proto.enums import hotel_rate_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__rate__type__pb2
from google.ads.google_ads.v3.proto.enums import month_of_year_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_month__of__year__pb2
from google.ads.google_ads.v3.proto.enums import placeholder_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placeholder__type__pb2
from google.ads.google_ads.v3.proto.enums import product_channel_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__pb2
from google.ads.google_ads.v3.proto.enums import product_channel_exclusivity_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__exclusivity__pb2
from google.ads.google_ads.v3.proto.enums import product_condition_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__condition__pb2
from google.ads.google_ads.v3.proto.enums import search_engine_results_page_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__engine__results__page__type__pb2
from google.ads.google_ads.v3.proto.enums import search_term_match_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__term__match__type__pb2
from google.ads.google_ads.v3.proto.enums import slot_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_slot__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/common/segments.proto',
package='google.ads.googleads.v3.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v3.commonB\rSegmentsProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V3.Common\312\002\036Google\\Ads\\GoogleAds\\V3\\Common\352\002\"Google::Ads::GoogleAds::V3::Common'),
serialized_pb=_b('\n3google/ads/googleads_v3/proto/common/segments.proto\x12\x1egoogle.ads.googleads.v3.common\x1a\x33google/ads/googleads_v3/proto/common/criteria.proto\x1a\x39google/ads/googleads_v3/proto/enums/ad_network_type.proto\x1a\x34google/ads/googleads_v3/proto/enums/click_type.proto\x1a\x44google/ads/googleads_v3/proto/enums/conversion_action_category.proto\x1aKgoogle/ads/googleads_v3/proto/enums/conversion_attribution_event_type.proto\x1a?google/ads/googleads_v3/proto/enums/conversion_lag_bucket.proto\x1aMgoogle/ads/googleads_v3/proto/enums/conversion_or_adjustment_lag_bucket.proto\x1a\x35google/ads/googleads_v3/proto/enums/day_of_week.proto\x1a\x30google/ads/googleads_v3/proto/enums/device.proto\x1a\x44google/ads/googleads_v3/proto/enums/external_conversion_source.proto\x1a\x43google/ads/googleads_v3/proto/enums/hotel_date_selection_type.proto\x1a<google/ads/googleads_v3/proto/enums/hotel_price_bucket.proto\x1a\x39google/ads/googleads_v3/proto/enums/hotel_rate_type.proto\x1a\x37google/ads/googleads_v3/proto/enums/month_of_year.proto\x1a:google/ads/googleads_v3/proto/enums/placeholder_type.proto\x1a\x39google/ads/googleads_v3/proto/enums/product_channel.proto\x1a\x45google/ads/googleads_v3/proto/enums/product_channel_exclusivity.proto\x1a;google/ads/googleads_v3/proto/enums/product_condition.proto\x1aIgoogle/ads/googleads_v3/proto/enums/search_engine_results_page_type.proto\x1a@google/ads/googleads_v3/proto/enums/search_term_match_type.proto\x1a.google/ads/googleads_v3/proto/enums/slot.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\xb1*\n\x08Segments\x12W\n\x0f\x61\x64_network_type\x18\x03 \x01(\x0e\x32>.google.ads.googleads.v3.enums.AdNetworkTypeEnum.AdNetworkType\x12J\n\nclick_type\x18\x1a \x01(\x0e\x32\x36.google.ads.googleads.v3.enums.ClickTypeEnum.ClickType\x12\x37\n\x11\x63onversion_action\x18\x34 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12x\n\x1a\x63onversion_action_category\x18\x35 \x01(\x0e\x32T.google.ads.googleads.v3.enums.ConversionActionCategoryEnum.ConversionActionCategory\x12<\n\x16\x63onversion_action_name\x18\x36 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x15\x63onversion_adjustment\x18\x1b \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x8b\x01\n!conversion_attribution_event_type\x18\x02 \x01(\x0e\x32`.google.ads.googleads.v3.enums.ConversionAttributionEventTypeEnum.ConversionAttributionEventType\x12i\n\x15\x63onversion_lag_bucket\x18\x32 \x01(\x0e\x32J.google.ads.googleads.v3.enums.ConversionLagBucketEnum.ConversionLagBucket\x12\x8f\x01\n#conversion_or_adjustment_lag_bucket\x18\x33 \x01(\x0e\x32\x62.google.ads.googleads.v3.enums.ConversionOrAdjustmentLagBucketEnum.ConversionOrAdjustmentLagBucket\x12*\n\x04\x64\x61te\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12K\n\x0b\x64\x61y_of_week\x18\x05 \x01(\x0e\x32\x36.google.ads.googleads.v3.enums.DayOfWeekEnum.DayOfWeek\x12@\n\x06\x64\x65vice\x18\x01 \x01(\x0e\x32\x30.google.ads.googleads.v3.enums.DeviceEnum.Device\x12x\n\x1a\x65xternal_conversion_source\x18\x37 \x01(\x0e\x32T.google.ads.googleads.v3.enums.ExternalConversionSourceEnum.ExternalConversionSource\x12\x38\n\x12geo_target_airport\x18\x41 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11geo_target_canton\x18L \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fgeo_target_city\x18> \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x12geo_target_country\x18M \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11geo_target_county\x18\x44 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x13geo_target_district\x18\x45 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10geo_target_metro\x18? \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12G\n!geo_target_most_specific_location\x18H \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12<\n\x16geo_target_postal_code\x18G \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x13geo_target_province\x18K \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x11geo_target_region\x18@ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10geo_target_state\x18\x43 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12>\n\x19hotel_booking_window_days\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x34\n\x0fhotel_center_id\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x39\n\x13hotel_check_in_date\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12Z\n\x1ahotel_check_in_day_of_week\x18\t \x01(\x0e\x32\x36.google.ads.googleads.v3.enums.DayOfWeekEnum.DayOfWeek\x12\x30\n\nhotel_city\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0bhotel_class\x18\x0b \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x33\n\rhotel_country\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12s\n\x19hotel_date_selection_type\x18\r \x01(\x0e\x32P.google.ads.googleads.v3.enums.HotelDateSelectionTypeEnum.HotelDateSelectionType\x12\x39\n\x14hotel_length_of_stay\x18\x0e \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x38\n\x12hotel_rate_rule_id\x18I \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12W\n\x0fhotel_rate_type\x18J \x01(\x0e\x32>.google.ads.googleads.v3.enums.HotelRateTypeEnum.HotelRateType\x12`\n\x12hotel_price_bucket\x18N \x01(\x0e\x32\x44.google.ads.googleads.v3.enums.HotelPriceBucketEnum.HotelPriceBucket\x12\x31\n\x0bhotel_state\x18\x0f \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x04hour\x18\x10 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x41\n\x1dinteraction_on_this_extension\x18\x31 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x07keyword\x18= \x01(\x0b\x32\'.google.ads.googleads.v3.common.Keyword\x12+\n\x05month\x18\x11 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12Q\n\rmonth_of_year\x18\x12 \x01(\x0e\x32:.google.ads.googleads.v3.enums.MonthOfYearEnum.MonthOfYear\x12\x36\n\x10partner_hotel_id\x18\x13 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\\\n\x10placeholder_type\x18\x14 \x01(\x0e\x32\x42.google.ads.googleads.v3.enums.PlaceholderTypeEnum.PlaceholderType\x12;\n\x15product_aggregator_id\x18\x1c \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x45\n\x1fproduct_bidding_category_level1\x18\x38 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x45\n\x1fproduct_bidding_category_level2\x18\x39 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x45\n\x1fproduct_bidding_category_level3\x18: \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x45\n\x1fproduct_bidding_category_level4\x18; \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x45\n\x1fproduct_bidding_category_level5\x18< \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rproduct_brand\x18\x1d \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12Y\n\x0fproduct_channel\x18\x1e \x01(\x0e\x32@.google.ads.googleads.v3.enums.ProductChannelEnum.ProductChannel\x12{\n\x1bproduct_channel_exclusivity\x18\x1f \x01(\x0e\x32V.google.ads.googleads.v3.enums.ProductChannelExclusivityEnum.ProductChannelExclusivity\x12_\n\x11product_condition\x18 \x01(\x0e\x32\x44.google.ads.googleads.v3.enums.ProductConditionEnum.ProductCondition\x12\x35\n\x0fproduct_country\x18! \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12?\n\x19product_custom_attribute0\x18\" \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12?\n\x19product_custom_attribute1\x18# \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12?\n\x19product_custom_attribute2\x18$ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12?\n\x19product_custom_attribute3\x18% \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12?\n\x19product_custom_attribute4\x18& \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_item_id\x18\' \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10product_language\x18( \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x13product_merchant_id\x18) \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x36\n\x10product_store_id\x18* \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rproduct_title\x18+ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_type_l1\x18, \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_type_l2\x18- \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_type_l3\x18. \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_type_l4\x18/ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fproduct_type_l5\x18\x30 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07quarter\x18\x15 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x83\x01\n\x1fsearch_engine_results_page_type\x18\x46 \x01(\x0e\x32Z.google.ads.googleads.v3.enums.SearchEngineResultsPageTypeEnum.SearchEngineResultsPageType\x12j\n\x16search_term_match_type\x18\x16 \x01(\x0e\x32J.google.ads.googleads.v3.enums.SearchTermMatchTypeEnum.SearchTermMatchType\x12:\n\x04slot\x18\x17 \x01(\x0e\x32,.google.ads.googleads.v3.enums.SlotEnum.Slot\x12-\n\x07webpage\x18\x42 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04week\x18\x18 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x04year\x18\x19 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"~\n\x07Keyword\x12\x38\n\x12\x61\x64_group_criterion\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x04info\x18\x02 \x01(\x0b\x32+.google.ads.googleads.v3.common.KeywordInfoB\xe8\x01\n\"com.google.ads.googleads.v3.commonB\rSegmentsProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V3.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V3\\Common\xea\x02\"Google::Ads::GoogleAds::V3::Commonb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_ad__network__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_click__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__action__category__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__attribution__event__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__lag__bucket__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__or__adjustment__lag__bucket__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_day__of__week__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_device__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_external__conversion__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__date__selection__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__price__bucket__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__rate__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_month__of__year__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placeholder__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__exclusivity__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__condition__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__engine__results__page__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__term__match__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_slot__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SEGMENTS = _descriptor.Descriptor(
name='Segments',
full_name='google.ads.googleads.v3.common.Segments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ad_network_type', full_name='google.ads.googleads.v3.common.Segments.ad_network_type', index=0,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='click_type', full_name='google.ads.googleads.v3.common.Segments.click_type', index=1,
number=26, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_action', full_name='google.ads.googleads.v3.common.Segments.conversion_action', index=2,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_action_category', full_name='google.ads.googleads.v3.common.Segments.conversion_action_category', index=3,
number=53, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_action_name', full_name='google.ads.googleads.v3.common.Segments.conversion_action_name', index=4,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_adjustment', full_name='google.ads.googleads.v3.common.Segments.conversion_adjustment', index=5,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_attribution_event_type', full_name='google.ads.googleads.v3.common.Segments.conversion_attribution_event_type', index=6,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_lag_bucket', full_name='google.ads.googleads.v3.common.Segments.conversion_lag_bucket', index=7,
number=50, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conversion_or_adjustment_lag_bucket', full_name='google.ads.googleads.v3.common.Segments.conversion_or_adjustment_lag_bucket', index=8,
number=51, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date', full_name='google.ads.googleads.v3.common.Segments.date', index=9,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='day_of_week', full_name='google.ads.googleads.v3.common.Segments.day_of_week', index=10,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device', full_name='google.ads.googleads.v3.common.Segments.device', index=11,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='external_conversion_source', full_name='google.ads.googleads.v3.common.Segments.external_conversion_source', index=12,
number=55, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_airport', full_name='google.ads.googleads.v3.common.Segments.geo_target_airport', index=13,
number=65, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_canton', full_name='google.ads.googleads.v3.common.Segments.geo_target_canton', index=14,
number=76, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_city', full_name='google.ads.googleads.v3.common.Segments.geo_target_city', index=15,
number=62, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_country', full_name='google.ads.googleads.v3.common.Segments.geo_target_country', index=16,
number=77, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_county', full_name='google.ads.googleads.v3.common.Segments.geo_target_county', index=17,
number=68, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_district', full_name='google.ads.googleads.v3.common.Segments.geo_target_district', index=18,
number=69, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_metro', full_name='google.ads.googleads.v3.common.Segments.geo_target_metro', index=19,
number=63, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_most_specific_location', full_name='google.ads.googleads.v3.common.Segments.geo_target_most_specific_location', index=20,
number=72, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_postal_code', full_name='google.ads.googleads.v3.common.Segments.geo_target_postal_code', index=21,
number=71, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_province', full_name='google.ads.googleads.v3.common.Segments.geo_target_province', index=22,
number=75, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_region', full_name='google.ads.googleads.v3.common.Segments.geo_target_region', index=23,
number=64, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_target_state', full_name='google.ads.googleads.v3.common.Segments.geo_target_state', index=24,
number=67, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_booking_window_days', full_name='google.ads.googleads.v3.common.Segments.hotel_booking_window_days', index=25,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_center_id', full_name='google.ads.googleads.v3.common.Segments.hotel_center_id', index=26,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_check_in_date', full_name='google.ads.googleads.v3.common.Segments.hotel_check_in_date', index=27,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_check_in_day_of_week', full_name='google.ads.googleads.v3.common.Segments.hotel_check_in_day_of_week', index=28,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_city', full_name='google.ads.googleads.v3.common.Segments.hotel_city', index=29,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_class', full_name='google.ads.googleads.v3.common.Segments.hotel_class', index=30,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_country', full_name='google.ads.googleads.v3.common.Segments.hotel_country', index=31,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_date_selection_type', full_name='google.ads.googleads.v3.common.Segments.hotel_date_selection_type', index=32,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_length_of_stay', full_name='google.ads.googleads.v3.common.Segments.hotel_length_of_stay', index=33,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_rate_rule_id', full_name='google.ads.googleads.v3.common.Segments.hotel_rate_rule_id', index=34,
number=73, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_rate_type', full_name='google.ads.googleads.v3.common.Segments.hotel_rate_type', index=35,
number=74, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_price_bucket', full_name='google.ads.googleads.v3.common.Segments.hotel_price_bucket', index=36,
number=78, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hotel_state', full_name='google.ads.googleads.v3.common.Segments.hotel_state', index=37,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hour', full_name='google.ads.googleads.v3.common.Segments.hour', index=38,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interaction_on_this_extension', full_name='google.ads.googleads.v3.common.Segments.interaction_on_this_extension', index=39,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keyword', full_name='google.ads.googleads.v3.common.Segments.keyword', index=40,
number=61, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='month', full_name='google.ads.googleads.v3.common.Segments.month', index=41,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='month_of_year', full_name='google.ads.googleads.v3.common.Segments.month_of_year', index=42,
number=18, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partner_hotel_id', full_name='google.ads.googleads.v3.common.Segments.partner_hotel_id', index=43,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='placeholder_type', full_name='google.ads.googleads.v3.common.Segments.placeholder_type', index=44,
number=20, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_aggregator_id', full_name='google.ads.googleads.v3.common.Segments.product_aggregator_id', index=45,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_bidding_category_level1', full_name='google.ads.googleads.v3.common.Segments.product_bidding_category_level1', index=46,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_bidding_category_level2', full_name='google.ads.googleads.v3.common.Segments.product_bidding_category_level2', index=47,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_bidding_category_level3', full_name='google.ads.googleads.v3.common.Segments.product_bidding_category_level3', index=48,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_bidding_category_level4', full_name='google.ads.googleads.v3.common.Segments.product_bidding_category_level4', index=49,
number=59, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_bidding_category_level5', full_name='google.ads.googleads.v3.common.Segments.product_bidding_category_level5', index=50,
number=60, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_brand', full_name='google.ads.googleads.v3.common.Segments.product_brand', index=51,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_channel', full_name='google.ads.googleads.v3.common.Segments.product_channel', index=52,
number=30, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_channel_exclusivity', full_name='google.ads.googleads.v3.common.Segments.product_channel_exclusivity', index=53,
number=31, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_condition', full_name='google.ads.googleads.v3.common.Segments.product_condition', index=54,
number=32, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_country', full_name='google.ads.googleads.v3.common.Segments.product_country', index=55,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_custom_attribute0', full_name='google.ads.googleads.v3.common.Segments.product_custom_attribute0', index=56,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_custom_attribute1', full_name='google.ads.googleads.v3.common.Segments.product_custom_attribute1', index=57,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_custom_attribute2', full_name='google.ads.googleads.v3.common.Segments.product_custom_attribute2', index=58,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_custom_attribute3', full_name='google.ads.googleads.v3.common.Segments.product_custom_attribute3', index=59,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_custom_attribute4', full_name='google.ads.googleads.v3.common.Segments.product_custom_attribute4', index=60,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_item_id', full_name='google.ads.googleads.v3.common.Segments.product_item_id', index=61,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_language', full_name='google.ads.googleads.v3.common.Segments.product_language', index=62,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_merchant_id', full_name='google.ads.googleads.v3.common.Segments.product_merchant_id', index=63,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_store_id', full_name='google.ads.googleads.v3.common.Segments.product_store_id', index=64,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_title', full_name='google.ads.googleads.v3.common.Segments.product_title', index=65,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_type_l1', full_name='google.ads.googleads.v3.common.Segments.product_type_l1', index=66,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_type_l2', full_name='google.ads.googleads.v3.common.Segments.product_type_l2', index=67,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_type_l3', full_name='google.ads.googleads.v3.common.Segments.product_type_l3', index=68,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_type_l4', full_name='google.ads.googleads.v3.common.Segments.product_type_l4', index=69,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product_type_l5', full_name='google.ads.googleads.v3.common.Segments.product_type_l5', index=70,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quarter', full_name='google.ads.googleads.v3.common.Segments.quarter', index=71,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='search_engine_results_page_type', full_name='google.ads.googleads.v3.common.Segments.search_engine_results_page_type', index=72,
number=70, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='search_term_match_type', full_name='google.ads.googleads.v3.common.Segments.search_term_match_type', index=73,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slot', full_name='google.ads.googleads.v3.common.Segments.slot', index=74,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webpage', full_name='google.ads.googleads.v3.common.Segments.webpage', index=75,
number=66, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='week', full_name='google.ads.googleads.v3.common.Segments.week', index=76,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='year', full_name='google.ads.googleads.v3.common.Segments.year', index=77,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1469,
serialized_end=6894,
)
_KEYWORD = _descriptor.Descriptor(
name='Keyword',
full_name='google.ads.googleads.v3.common.Keyword',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ad_group_criterion', full_name='google.ads.googleads.v3.common.Keyword.ad_group_criterion', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='info', full_name='google.ads.googleads.v3.common.Keyword.info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6896,
serialized_end=7022,
)
_SEGMENTS.fields_by_name['ad_network_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_ad__network__type__pb2._ADNETWORKTYPEENUM_ADNETWORKTYPE
_SEGMENTS.fields_by_name['click_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_click__type__pb2._CLICKTYPEENUM_CLICKTYPE
_SEGMENTS.fields_by_name['conversion_action'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['conversion_action_category'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__action__category__pb2._CONVERSIONACTIONCATEGORYENUM_CONVERSIONACTIONCATEGORY
_SEGMENTS.fields_by_name['conversion_action_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['conversion_adjustment'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_SEGMENTS.fields_by_name['conversion_attribution_event_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__attribution__event__type__pb2._CONVERSIONATTRIBUTIONEVENTTYPEENUM_CONVERSIONATTRIBUTIONEVENTTYPE
_SEGMENTS.fields_by_name['conversion_lag_bucket'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__lag__bucket__pb2._CONVERSIONLAGBUCKETENUM_CONVERSIONLAGBUCKET
_SEGMENTS.fields_by_name['conversion_or_adjustment_lag_bucket'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_conversion__or__adjustment__lag__bucket__pb2._CONVERSIONORADJUSTMENTLAGBUCKETENUM_CONVERSIONORADJUSTMENTLAGBUCKET
_SEGMENTS.fields_by_name['date'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['day_of_week'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_day__of__week__pb2._DAYOFWEEKENUM_DAYOFWEEK
_SEGMENTS.fields_by_name['device'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_device__pb2._DEVICEENUM_DEVICE
_SEGMENTS.fields_by_name['external_conversion_source'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_external__conversion__source__pb2._EXTERNALCONVERSIONSOURCEENUM_EXTERNALCONVERSIONSOURCE
_SEGMENTS.fields_by_name['geo_target_airport'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_canton'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_city'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_country'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_county'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_district'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_metro'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_most_specific_location'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_postal_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_province'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_region'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['geo_target_state'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hotel_booking_window_days'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_SEGMENTS.fields_by_name['hotel_center_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_SEGMENTS.fields_by_name['hotel_check_in_date'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hotel_check_in_day_of_week'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_day__of__week__pb2._DAYOFWEEKENUM_DAYOFWEEK
_SEGMENTS.fields_by_name['hotel_city'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hotel_class'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_SEGMENTS.fields_by_name['hotel_country'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hotel_date_selection_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__date__selection__type__pb2._HOTELDATESELECTIONTYPEENUM_HOTELDATESELECTIONTYPE
_SEGMENTS.fields_by_name['hotel_length_of_stay'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_SEGMENTS.fields_by_name['hotel_rate_rule_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hotel_rate_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__rate__type__pb2._HOTELRATETYPEENUM_HOTELRATETYPE
_SEGMENTS.fields_by_name['hotel_price_bucket'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_hotel__price__bucket__pb2._HOTELPRICEBUCKETENUM_HOTELPRICEBUCKET
_SEGMENTS.fields_by_name['hotel_state'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['hour'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_SEGMENTS.fields_by_name['interaction_on_this_extension'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_SEGMENTS.fields_by_name['keyword'].message_type = _KEYWORD
_SEGMENTS.fields_by_name['month'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['month_of_year'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_month__of__year__pb2._MONTHOFYEARENUM_MONTHOFYEAR
_SEGMENTS.fields_by_name['partner_hotel_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['placeholder_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_placeholder__type__pb2._PLACEHOLDERTYPEENUM_PLACEHOLDERTYPE
_SEGMENTS.fields_by_name['product_aggregator_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_SEGMENTS.fields_by_name['product_bidding_category_level1'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_bidding_category_level2'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_bidding_category_level3'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_bidding_category_level4'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_bidding_category_level5'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_brand'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_channel'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__pb2._PRODUCTCHANNELENUM_PRODUCTCHANNEL
_SEGMENTS.fields_by_name['product_channel_exclusivity'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__channel__exclusivity__pb2._PRODUCTCHANNELEXCLUSIVITYENUM_PRODUCTCHANNELEXCLUSIVITY
_SEGMENTS.fields_by_name['product_condition'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_product__condition__pb2._PRODUCTCONDITIONENUM_PRODUCTCONDITION
_SEGMENTS.fields_by_name['product_country'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_custom_attribute0'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_custom_attribute1'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_custom_attribute2'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_custom_attribute3'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_custom_attribute4'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_item_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_language'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_merchant_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT64VALUE
_SEGMENTS.fields_by_name['product_store_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_title'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_type_l1'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_type_l2'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_type_l3'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_type_l4'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['product_type_l5'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['quarter'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['search_engine_results_page_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__engine__results__page__type__pb2._SEARCHENGINERESULTSPAGETYPEENUM_SEARCHENGINERESULTSPAGETYPE
_SEGMENTS.fields_by_name['search_term_match_type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_search__term__match__type__pb2._SEARCHTERMMATCHTYPEENUM_SEARCHTERMMATCHTYPE
_SEGMENTS.fields_by_name['slot'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_slot__pb2._SLOTENUM_SLOT
_SEGMENTS.fields_by_name['webpage'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['week'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_SEGMENTS.fields_by_name['year'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_KEYWORD.fields_by_name['ad_group_criterion'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_KEYWORD.fields_by_name['info'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_criteria__pb2._KEYWORDINFO
DESCRIPTOR.message_types_by_name['Segments'] = _SEGMENTS
DESCRIPTOR.message_types_by_name['Keyword'] = _KEYWORD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Segments = _reflection.GeneratedProtocolMessageType('Segments', (_message.Message,), dict(
DESCRIPTOR = _SEGMENTS,
__module__ = 'google.ads.googleads_v3.proto.common.segments_pb2'
,
__doc__ = """Segment only fields.
Attributes:
ad_network_type:
Ad network type.
click_type:
Click type.
conversion_action:
Resource name of the conversion action.
conversion_action_category:
Conversion action category.
conversion_action_name:
Conversion action name.
conversion_adjustment:
This segments your conversion columns by the original
conversion and conversion value vs. the delta if conversions
were adjusted. False row has the data as originally stated;
While true row has the delta between data now and the data as
originally stated. Summing the two together results post-
adjustment data.
conversion_attribution_event_type:
Conversion attribution event type.
conversion_lag_bucket:
An enum value representing the number of days between the
impression and the conversion.
conversion_or_adjustment_lag_bucket:
An enum value representing the number of days between the
impression and the conversion or between the impression and
adjustments to the conversion.
date:
Date to which metrics apply. yyyy-MM-dd format, e.g.,
2018-04-17.
day_of_week:
Day of the week, e.g., MONDAY.
device:
Device to which metrics apply.
external_conversion_source:
External conversion source.
geo_target_airport:
Resource name of the geo target constant that represents an
airport.
geo_target_canton:
Resource name of the geo target constant that represents a
canton.
geo_target_city:
Resource name of the geo target constant that represents a
city.
geo_target_country:
Resource name of the geo target constant that represents a
country.
geo_target_county:
Resource name of the geo target constant that represents a
county.
geo_target_district:
Resource name of the geo target constant that represents a
district.
geo_target_metro:
Resource name of the geo target constant that represents a
metro.
geo_target_most_specific_location:
Resource name of the geo target constant that represents the
most specific location.
geo_target_postal_code:
Resource name of the geo target constant that represents a
postal code.
geo_target_province:
Resource name of the geo target constant that represents a
province.
geo_target_region:
Resource name of the geo target constant that represents a
region.
geo_target_state:
Resource name of the geo target constant that represents a
state.
hotel_booking_window_days:
Hotel booking window in days.
hotel_center_id:
Hotel center ID.
hotel_check_in_date:
Hotel check-in date. Formatted as yyyy-MM-dd.
hotel_check_in_day_of_week:
Hotel check-in day of week.
hotel_city:
Hotel city.
hotel_class:
Hotel class.
hotel_country:
Hotel country.
hotel_date_selection_type:
Hotel date selection type.
hotel_length_of_stay:
Hotel length of stay.
hotel_rate_rule_id:
Hotel rate rule ID.
hotel_rate_type:
Hotel rate type.
hotel_price_bucket:
Hotel price bucket.
hotel_state:
Hotel state.
hour:
Hour of day as a number between 0 and 23, inclusive.
interaction_on_this_extension:
Only used with feed item metrics. Indicates whether the
interaction metrics occurred on the feed item itself or a
different extension or ad unit.
keyword:
Keyword criterion.
month:
Month as represented by the date of the first day of a month.
Formatted as yyyy-MM-dd.
month_of_year:
Month of the year, e.g., January.
partner_hotel_id:
Partner hotel ID.
placeholder_type:
Placeholder type. This is only used with feed item metrics.
product_aggregator_id:
Aggregator ID of the product.
product_bidding_category_level1:
Bidding category (level 1) of the product.
product_bidding_category_level2:
Bidding category (level 2) of the product.
product_bidding_category_level3:
Bidding category (level 3) of the product.
product_bidding_category_level4:
Bidding category (level 4) of the product.
product_bidding_category_level5:
Bidding category (level 5) of the product.
product_brand:
Brand of the product.
product_channel:
Channel of the product.
product_channel_exclusivity:
Channel exclusivity of the product.
product_condition:
Condition of the product.
product_country:
Resource name of the geo target constant for the country of
sale of the product.
product_custom_attribute0:
Custom attribute 0 of the product.
product_custom_attribute1:
Custom attribute 1 of the product.
product_custom_attribute2:
Custom attribute 2 of the product.
product_custom_attribute3:
Custom attribute 3 of the product.
product_custom_attribute4:
Custom attribute 4 of the product.
product_item_id:
Item ID of the product.
product_language:
Resource name of the language constant for the language of the
product.
product_merchant_id:
Merchant ID of the product.
product_store_id:
Store ID of the product.
product_title:
Title of the product.
product_type_l1:
Type (level 1) of the product.
product_type_l2:
Type (level 2) of the product.
product_type_l3:
Type (level 3) of the product.
product_type_l4:
Type (level 4) of the product.
product_type_l5:
Type (level 5) of the product.
quarter:
Quarter as represented by the date of the first day of a
quarter. Uses the calendar year for quarters, e.g., the second
quarter of 2018 starts on 2018-04-01. Formatted as yyyy-MM-dd.
search_engine_results_page_type:
Type of the search engine results page.
search_term_match_type:
Match type of the keyword that triggered the ad, including
variants.
slot:
Position of the ad.
webpage:
Resource name of the ad group criterion that represents
webpage criterion.
week:
Week as defined as Monday through Sunday, and represented by
the date of Monday. Formatted as yyyy-MM-dd.
year:
Year, formatted as yyyy.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.common.Segments)
))
_sym_db.RegisterMessage(Segments)
Keyword = _reflection.GeneratedProtocolMessageType('Keyword', (_message.Message,), dict(
DESCRIPTOR = _KEYWORD,
__module__ = 'google.ads.googleads_v3.proto.common.segments_pb2'
,
__doc__ = """A Keyword criterion segment.
Attributes:
ad_group_criterion:
The AdGroupCriterion resource name.
info:
Keyword info.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.common.Keyword)
))
_sym_db.RegisterMessage(Keyword)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from .mse import mse
from .rmse import rmse |
#!/usr/bin/env python
"""
Wrapper for the Simulated Annealing Solver provided by GSL.
The simulated annealing algorithm takes random walks through the problem
space, looking for points with low energies; in these random walks, the
probability of taking a step is determined by the Boltzmann distribution,
p = e^{-(E_{i+1} - E_i)/(kT)}
if E_{i+1} > E_i, and p = 1 when E_{i+1} <= E_i.
In other words, a step will occur if the new energy is lower. If the new
energy is higher, the transition can still occur, and its likelihood is
proportional to the temperature T and inversely proportional to the energy
difference E_{i+1} - E_i.
The temperature T is initially set to a high value, and a random walk is
carried out at that temperature. Then the temperature is lowered very
slightly according to a "cooling schedule", for example: T -> T/mu_T where
\mu_T is slightly greater than 1.
The slight probability of taking a step that gives higher energy is what
allows simulated annealing to frequently get out of local minima.
This wrapper does not follow the GSL interface as closely as the other wrappers
in this package. Instead it expects an object describing the problem with the
required methods. NumericEnsemble illustrates the necessary methods.
The function solve does the real job.
Have a look in the examples directory for the pythonic version of the simple
problem as described in the GSL reference document.
"""
# Author: Pierre Schnizer
# Date : 2003
import copy
import _siman
# The real solver
solve = _siman.solve
class NumericEnsemble:
"""
A base class implementation to support the use of numeric arrays as
configurations. You must overload the following functions
EFunc
Step
Metric
Clone
in a derived class.
If you want, that the solver prints it status to the stdout add a
Print method.
"""
def __init__(self):
self._data = None
def SetData(self, data):
self._data = data
def GetData(self):
return self._data
def EFunc(self):
"""
Calculate the energy of the current status.
Output:
energy .... a Python float of the current energy
"""
return energy
def Step(self, rng, step_size):
"""
Take a step
Input:
rng ... a pygsl.rng instance
step_size ... a python float for the step size to be taken
"""
return None
def Metric(self, other):
"""
Calculate the distance between this object and the other.
Input:
other ... a instance of the same type
Output:
length ... a python float for the distance between this instance
and the other.
"""
return length
def Clone(self):
"""
Make a clone of the current object. Please be careful how you step and
clone so that your objects are different!
Output:
clone ... a identical clone of this object.
"""
clone = self.__class__()
clone.SetData(copy.copy(self._data))
return clone
def Print(self):
"""
Print the current state of the ensemble
"""
def __del__(self):
# Not necessary, just illustration
del self._data
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 10:51:18 2018
@author: olmer.garciab
"""
# implementing class example
P=[[0,0.5,0,0,0.,0.5,0],
[0,0,0.8,0,0.0,0,0.2],
[0,0.,0,0.6,0.4,0,0],
[0,0.,0,0,0.,0,1],
[0.2,0.4,0.4,0,0,0,0],
[0.1,0.,0,0,0.,0.9,0],
[0,0.,0,0,0.,0,1]]
R=[-2,-2,-2,10,1,-1,0]
# total dicount reward
def G_t(S,R,gamma):
g=0
for s,i in zip(S,range(len(S))):
g=g+R[s]*gamma**i
return g
#g = lambda y: sum( f(y) for f in (lambda x: x**i for i in range(n)) )
# for example for the chain of state S_1
gamma=0.5
S_1=[0,1,2,3,6]
print(G_t(S_1,R,gamma))
#dynamic programming
#based in #https://harderchoices.com/2018/02/26/dynamic-programming-in-python-reinforcement-learning/
def iterative_value_function(N, theta=0.0001, gamma=0.9):
V_s =R.copy() # 1.
probablitiy_map = P # 2.
delta = 100 # 3.
while not delta < theta: # 4.
delta = 0 # 5.
for state in range(0,N): # 6.
v = V_s[state] # 7.
total =R[state] # 8.
for state_prime in range(0,N):
total += probablitiy_map[state][state_prime] * (gamma * V_s[state_prime])
#print(total)
V_s[state] =total # 9.
delta = max(delta, abs(v - V_s[state])) # 10.
#print(delta)
V_s=[round(v,2) for v in V_s]
return V_s # 11.
N=len(R)
print('gamma',0.9,iterative_value_function(N,gamma=0.9))
print('gamma',1,iterative_value_function(N,gamma=1))
print('gamma',0,iterative_value_function(N,gamma=0))
#vectorial way
import numpy as np
from numpy.linalg import inv
gamma=0.9
P=np.array(P)
R=np.array(R).reshape((-1,1))
v=np.matmul(inv(np.eye(N)-gamma*P),R)
print(v)
|
#!/usr/bin/env python3
import argparse
import binascii
import socket
import struct
import threading
import os
import sys
import aiohttp
from aiohttp import web
# Configuration
PORT_BLOBS = 9021
PORT_DEBUG = 9022
# Context
current_file = None
# Sockets
def server_blobs():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', PORT_BLOBS))
except socket.error as msg:
print('blobs-server: Bind failed: %s\n' % msg)
sys.exit()
s.listen(5)
while True:
c, addr = s.accept()
print('blobs-server: Client connected: %s:%s' % addr)
while True:
# File path
path_size = c.recv(8)
if not path_size: break
path_size = struct.unpack('Q', path_size)[0]
if not path_size: break
path = c.recv(path_size, socket.MSG_WAITALL)
path = os.path.join('dump', path.decode('utf-8'))
# File data
data_size = c.recv(8)
if not data_size: break
data_size = struct.unpack('Q', data_size)[0]
if not data_size: break
data = c.recv(data_size, socket.MSG_WAITALL)
# Save file
path_dir = os.path.dirname(path)
if path_dir and not os.path.exists(path_dir):
os.makedirs(path_dir, exist_ok=True)
with open(path, 'wb') as f:
f.write(data)
print('blobs-server: Client disconnected: %s:%s' % addr)
c.close()
s.close()
def server_debug():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', PORT_DEBUG))
except socket.error as msg:
print('debug-server: Bind failed: %s\n' % msg)
sys.exit()
s.listen(5)
while True:
c, addr = s.accept()
print('debug-server: Client connected: %s:%s' % addr)
while True:
# TODO: There's surely a better way, but whatever
byte = c.recv(1)
if not byte:
break
sys.stdout.buffer.write(byte)
sys.stdout.flush()
print('debug-server: Client disconnected: %s:%s' % addr)
c.close()
s.close()
# Website
async def handle_index(request):
return web.FileResponse('./index.html')
async def handle_websocket(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
current_file = os.path.normpath(
os.path.join('dump', msg.data))
elif msg.type == aiohttp.WSMsgType.BINARY and current_file:
os.makedirs(os.path.dirname(current_file), exist_ok=True)
with open(current_file, 'wb') as f:
f.write(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
print('WS connection closed with exception %s' % ws.exception())
print('WS connection closed')
return ws
def main():
# Handle arguments
parser = argparse.ArgumentParser(
description='Create server for Orbital dumper.')
parser.add_argument('-p', '--port', type=int, default=80, required=False,
help='Port for HTTP/WS server')
args = parser.parse_args()
# Create sockets
t_blobs = threading.Thread(target=server_blobs)
t_debug = threading.Thread(target=server_debug)
t_blobs.start()
t_debug.start()
# Create webserver
app = web.Application()
app.router.add_get('/', handle_index)
app.router.add_get('/ws', handle_websocket)
app.router.add_static('/', path='.')
web.run_app(app, port=args.port)
if __name__ == '__main__':
main()
|
## THIS IS A MODIFIED EXAMPLE FROM THE CIRQ WEBSITE ##
import cirq
import numpy as np
import matplotlib.pylab as plt
## define a custom gate in Cirq
class MyGate1(cirq.SingleQubitGate):
def _unitary_(self):
return np.array([[3 / 5, 4 / 5], [-4 / 5, 3 / 5]])
def __str__(self):
return 'λ'
## use custom gate
a = cirq.NamedQubit('a')
gate1 = MyGate1()
circuit1 = cirq.Circuit(gate1(a))
print(circuit1)
## simulate the circuit
simulator = cirq.Simulator()
result1 = simulator.simulate(circuit1)
print(result1)
##
##a: ───λ───
##measurements: (no measurements)
##output vector: 0.6|0⟩ - 0.8|1⟩
|
x = [4, 4, 4, 1, 2, 3, 3, 4, 2, 4]
y = ["4", "4", "4", "1", "2", "3", "3", "4", "2", "4"]
z = enumerate(y)
for i,j in z:
print(i, j) |
import uvicorn
from fastapi import FastAPI, File, UploadFile
from starlette.responses import StreamingResponse
from inference_api import get_FINED_edge, read_imagefile
import cv2
import numpy as np
import io
app = FastAPI()
#route
@app.get('/')
def index():
return {"Data" : "Homepage Test"}
@app.post("/predict/image")
async def predict_api(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
#img = read_imagefile(file.read())
contents = await file.read()
nparr = np.fromstring(contents, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
prediction = get_FINED_edge(img)
res, im_png = cv2.imencode(".png", prediction)
return StreamingResponse(io.BytesIO(im_png.tobytes()), media_type="image/png")
#return {"Data" : "OK"}
if __name__ == '__main__':
uvicorn.run(app,host="0.0.0.0",port=8000)
|
import serial
import time
ser = serial.Serial(
port='/dev/ttyUSB0',\
baudrate=115200
)
try:
with open('../cmd.txt') as f:
line = f.readline()
while line:
ser.write(line.encode())
line = f.readline()
time.sleep(0.1)
except PermissionError as e:
print("Check your permission!");
|
import sys
import math
import sys
import os
import csv
import numpy as np
class naive_bayes():
########## SET OF FUNTIONS FOR THE NAIVE BAYES CLASSIFIER ############
#The naive bayes classifies scans the training set to find the posterior
#probabilities of an class given an instance. Then, the classifier, assigns
#to the instance the class with highest posterior probabilities.
####### FUNCTION TO CALCULATE THE PRIOR PROBABILITIES OF EACH CLASS ########
#The following function calculates the prior probabilities of a class/label.
#We calculate the prior by counting the number of times a label appear and
#dividing this number by the total number of instances.
def calculation_prior_probabilities(self,labels):
#first, find out the name of the unique labels and their frequencies
unique_labels, frequency_labels_vector=\
np.unique(labels,return_counts=True)
#Change the format of the returned vector to perform divisions later.
frequency_labels_vector.astype(float)
#Calculate the number of instances 'N' to then find the probabilities
#using thself.weight_matrix_first_classifiere frequencies of each labels
number_labels=len(labels)
#Calculate the number of unique labels
number_unique_labels=len(unique_labels)
#Divide the frequencies by the total number of labels to get prior.
prior_probabilities_vector=frequency_labels_vector/float(number_labels)
#Create a matrix to display the results properly and keep track
#of the labels. The matrix dimensions are: (2 x number of unique labels)
#In each column we store the label identifier/name and its prior.
#The firs row contains the name of the labels and the second the priors
matrix_of_priors=np.concatenate((
unique_labels.reshape((1,number_unique_labels)),
prior_probabilities_vector.reshape((1,number_unique_labels))
), axis=0)
#return the generated matrix
return matrix_of_priors
###################### END OF THE FUNCTION ###################
######## FUNCTION TO CALCULATE THE LIKEHOOD PROBABILITIES ########
######## OF A GIVEN VALUE OF AN ATTRIBUTE FOR A SINGLE ATTRIBUTE #####
#The function returns the likehood of an attribute value. We will use this
#Function in combination with other fucntion to calculate the likehood of
#all the attribute values in an instance.
def likehood_values_in_single_attribute(self,attributes,labels,\
attribute_index,value_look_for, m_estimate= True):
#Make sure the inputs are in an array format.
labels=np.array(labels)
#Identify basic characteristics of the inputs.
number_instances = attributes.shape[0]
number_attributes = attributes.shape[1]
#Find the values/names of of the unique labels and their frequencies.
label_values, frequency_labels = np.unique(labels, return_counts=True)
#Retrieve attribute column where the value we are querying belongs.
list_attribute_of_analysis=attributes[:,attribute_index]
#Find unique attribute values in the dataset and their frequencies.
#'attribute_domain' stores the unique values in the attribute domain.
#'attribute_domain_counter'registers the frequency of each unique value.
attribute_domain , attribute_domain_counter= \
np.unique(list_attribute_of_analysis, return_counts=True)
#We create a numpy array to handle the values in a nicer way.
#Dimensions: (number of unique attribute values x float)
attribute_domain_matrix=\
np.array((np.transpose(attribute_domain),\
np.transpose(attribute_domain_counter)), dtype=float)
#Sometimes there is not enought data and likehhod return 0 as value.
#To solve this problem we can smooth probabilities with m-stimate
#or laplace smoothing.The m-stimate requires the number of different
#values that the attribute can take. Then using this we get
#the 'prior estimate' of the attribute value. We will use this later
k_number_values_attribute_take= float(len(attribute_domain))
attribute_prior_estimate= float(float(1)/k_number_values_attribute_take)
#Next step, we create a boolean index of the value we are looking for.
#With the boolean index we will be able to acquire the labels assigned
#to the value we are looking for.
boolean_vector_attribute_domain =\
[value_look_for == instance_attribute for instance_attribute in\
list_attribute_of_analysis]
#Transform the boolean list into arrays to handle it better.
boolean_vector_attribute_domain=np.array(boolean_vector_attribute_domain)
#Using the boolean index, generate an array with the labels that
#corresponds to the attributes values we are analyzing.
labels_of_observed_attribute=labels[boolean_vector_attribute_domain]
#Now,we create a vector to store the number of times each label is
#assigned to the value we want to get the posterior probability.
#Dimentsion: (number of labels x 1)
attribute_domain_class_counter_vector=\
np.zeros((len(label_values),1), dtype=float)
#We loop all values the labels can acquire and we count how many of
#these labels are assigned to the value we want.
for label_index in range(len(label_values)):
#Get the label we want to count with the index
label_analysis=label_values[label_index]
#Obtain an array where the label are the same as the label we are
#parsing
vector_labels_equal_parsed_label=\
np.where(labels_of_observed_attribute==label_analysis)[0]
#Count the time that the specific label appears when the attribute
#acquires the value we are looking.
counter_label_in_attribute=len(vector_labels_equal_parsed_label)
#Implementation of the m-estimate methodology smooths the likehoods
#probabilities to avoid problems in the likehood multiplication.
#Hence, we avoid problems when one of the likehood is 0.
#Here we add to the counter an estimation probability multipliesd by
#the number of attributes of the input
if m_estimate == True:
counter_label_in_attribute=\
counter_label_in_attribute+\
attribute_prior_estimate*number_attributes
#Replace the value in the count matrix
attribute_domain_class_counter_vector[label_index]=\
counter_label_in_attribute
#Get the likehood vector: We divide the number of times that the given
#attribute value is labelled as a specif class by the class frequency.
#Hence, we obtain the likehoods of the given value per every class.
#If we are smothing with the m-stimate we have to add to the denominator
#the number of attributes that each instance has.
if m_estimate == True:
frequency_labels=frequency_labels+number_attributes
#Calculate the likehoods
likehoods_attribute_per_class=\
np.divide(np.transpose(attribute_domain_class_counter_vector),\
frequency_labels)
#To keep track of the labels, we create a matrix that indicates which
#labels belongsto each likehood.
likehoods_matrix=\
np.concatenate((label_values.reshape((1,len(label_values)))\
,likehoods_attribute_per_class), axis=0)
#return the matrix we have created
return likehoods_matrix
###################### END OF THE FUNCTION ######################
######## FUNCTION TO CALCULATE THE LIKEHOOD PROBABILITIES ########
## GIVEN A NORMAL DISTRIBUTION AND THE MEAN AND STD OF THE DISTRIBUTION ##
def pdf(self, value_look, mean, std):
value_look = float(value_look - mean) / std
return math.exp(-value_look*value_look/2.0) / math.sqrt(2.0*math.pi) /std
###################### END OF THE FUNCTION ######################
######## FUNCTION TO CALCULATE THE LIKEHOOD PROBABILITIES ########
######## OF NUMERICAL DATA ########
def likehood_values_in_single_attribute_numerical(self,attributes,labels,\
attribute_index,value_look_for):
#Make sure the inputs are in an array format.
labels=np.array(labels)
#Identify basic characteristics of the inputs.
number_instances = attributes.shape[0]
number_attributes = attributes.shape[1]
#Find the values/names of of the unique labels and their frequencies.
label_values, frequency_labels = np.unique(labels, return_counts=True)
#Retrieve attribute column where the value we are querying belongs.
list_attribute_of_analysis=attributes[:,attribute_index]
#Now,we create a vector to store the number of times each label is
#assigned to the value we want to get the posterior probability.
#Dimentsion: (number of labels x 1)
attribute_likehood_to_class=\
np.zeros((len(label_values),1), dtype=float)
#We loop all values the labels can acquire and we count how many of
#these labels are assigned to the value we want.
for label_index in range(len(label_values)):
#Get the label we want to count with the index
label_analysis=label_values[label_index]
boolean_vector_label_analysis=\
np.array([label_analysis == instance_label for instance_label in\
labels])
#Obtain an array where the label are the same as the label we are
#parsing
vector_attributes_with_labels_equal_to_parsed_label=\
list_attribute_of_analysis[boolean_vector_label_analysis]
#Get the mean and the std of the intances labelled with the label
#we are analysing
mean_numeric_attribute_labels=\
vector_attributes_with_labels_equal_to_parsed_label.mean()
std_numeric_attribute_labels=\
vector_attributes_with_labels_equal_to_parsed_label.std()
#Based of the std and the mean we get the likehood of our numerical
#value to the label assuming that is a gaussian distribution
likehood_value=self.pdf(value_look_for, \
mean_numeric_attribute_labels, std_numeric_attribute_labels)
attribute_likehood_to_class[label_index]=likehood_value
likehood_matrix=\
np.concatenate((label_values.reshape((1,len(label_values)))\
,np.transpose(attribute_likehood_to_class)),axis=0)
return likehood_matrix
###################### END OF THE FUNCTION ######################
######## FUNCTION TO CALCULATE THE LIKEHOOD PROBABILITIES ########
############## OF A GIVEN INSTANCE FOR ALL THE ATTRIBUTES #######
#This functions uses the function 'likehood_values_in_single_attribute'
#to return the likehoods of all the attributes values of a given instance.
def find_likehood_probabilities_of_instance(self, instance, attributes,\
labels, numerical_attribute_indication_matrix):
#Make sure the inputs are in the right format
instance=np.array(instance)
attributes=np.array(attributes)
#First, we get basic information about the inputs.
number_attributes=instance.shape[0]
number_instances=attributes.shape[0]
label_values, frequency_labels = np.unique(labels, return_counts=True)
number_labels=len(label_values)
#Create a matrix to store the likehoods for every labels for every
#attribute in the instance we want to classify.
#Dimension:(dimension of the feature vector x number different labels)
instance_likehood_matrix=\
np.zeros((number_attributes,number_labels),dtype=float)
#Loop all the attribute values in the instance we want to classify.
for attribute_values_index in range(number_attributes):
#Get the value of the attribute we are parsing
attribute_value_analysis=instance[attribute_values_index]
numeric_indication=\
numerical_attribute_indication_matrix[attribute_values_index]
if numeric_indication ==1:
likehood_attribute_value_analysis=\
self.likehood_values_in_single_attribute_numerical(attributes,labels,\
attribute_values_index,attribute_value_analysis)[1]
else:
#Get the likehood of the attribute for each classs
likehood_attribute_value_analysis=\
self.likehood_values_in_single_attribute(attributes,labels,\
attribute_values_index,attribute_value_analysis)[1]
#Put the resulting vector into the matrix that stores the likehoods.
instance_likehood_matrix[attribute_values_index]=\
likehood_attribute_value_analysis
return instance_likehood_matrix
###################### END OF THE FUNCTION ######################
#### FUNCTION TO CALCULATE THE POSTERIOR PROBABILITIES FOR EVERY CLASS ####
######### FOR A GIVEN INSTANCE FOR ALL THE ATTRIBUTES ###########
#The function calculates the posterior probabilities of a given instance
#for every class. Following the naive bayes theory we multiply all the
#likehoods of all the single attributes values of the instance to get the
#likehood of the instance ssuming that all attributes are independent.
#The when we will get the instance likehood for that class that we multiply
#by the class prior to get the posterior of the class given the instance.
def posterior_each_class_claculation (self,likehood_matrix, prior_vector):
#Multiply all the elements of a columns given a matrix with all the
#likenhoods for every class for every attribute of the instance.
#Where the likehoods are in the columns
product_likehood=np.prod(likehood_matrix , axis=0)
#Dot Multiply the vector with the prior probabilities with the likehood
#per class vector to get the posterior vector.
posterior_vector_each_class=np.multiply(product_likehood,prior_vector[1])
#Create a matrix with the name of the labels and the posteriors.
posterior_matrix_each_class=np.concatenate\
((prior_vector[0].reshape((1,len(prior_vector[0]))),\
posterior_vector_each_class.reshape((1,len(posterior_vector_each_class)))),axis=0)
#Return the new matrix
return posterior_matrix_each_class
###################### END OF THE FUNCTION ######################
########## FUNCTION TO PERFORM THE MAXIMUM A POSTERIORI ESTIMATE ##########
#The input is a matrix with all the posterior probabilities of an instance.
#And an indication of the label of every posterior.
def maximum_aposteriori_estimate(self,posterior_matrix_each_class):
#Get the index of the maximum posterior probability
index_maximum_posterior=np.argmax(posterior_matrix_each_class[1])
#Using the index obtain the class labels
class_maximum_posterior=\
int(posterior_matrix_each_class[0][index_maximum_posterior])
##return the class
return class_maximum_posterior
###################### END OF THE FUNCTION ######################
########## FUNCTION TO PERFORM THE NAIVE BAYES CLASSIFIER ################
####### COMPILATION OF NAIVE BAYES FUNCTIONS O CLASSIFY #######
#Here we compile all the previously created function to simplify the
#classification of multiple intances.
def naive_bayes_classifier_instance(self,attributes,labels, instance_matrix,\
numerical_indications):
#Make sure the input is in the required format
instance_matrix=np.array(instance_matrix).reshape((1,len(instance_matrix)))
#Basic information of the input we are using
number_instances_to_classifify=instance_matrix.shape[0]
#Create a vector to store the classifications.
classfication_vector=\
np.zeros((1,number_instances_to_classifify),dtype=int)
#Loop through all the instances that we want to predict.
for instance_to_predict_index in range(number_instances_to_classifify):
#Get the instance we want to predict from the instance meatrix
instance_to_predict=instance_matrix[instance_to_predict_index,:]
#Calculate the label priors
priors=self.calculation_prior_probabilities(labels)
#Know whether the attribute is numerical or categorical based on
#The array we created.
numerical_indicator=numerical_indications[instance_to_predict_index]
#Get the likeehoods of the instance per each class
likehood_instance_each_class=\
self.find_likehood_probabilities_of_instance(instance_to_predict,\
attributes,labels,numerical_indications)
#Calculate the posteriors of each class for the given instance.
posterior_each_class=\
self.posterior_each_class_claculation(likehood_instance_each_class,priors)
#Classify by assigning the label with the highest posterior.
classification_instance=\
self.maximum_aposteriori_estimate(posterior_each_class)
#Put the result of the prediciton into the vector that stores the
#predictions.
classfication_vector[0,instance_to_predict_index]=\
classification_instance
#return the vector with the predictions
return classfication_vector
###################### END OF THE FUNCTION ######################
def naive_bayes_classifier(self,attributes,labels, instance_matrix,\
numerical_indications):
#Create array with the same number of instances as the matrix we are
#classifying to store the results.
classification_matrix=np.zeros((len(instance_matrix),1), dtype=int)
#Use naibe_bayes classifier for each intance of the dataset
for instance_index in range(len(instance_matrix)):
#get the instance we classify
instance_to_classify=np.array(instance_matrix[instance_index,:])
#classify the instance
classification=self.naive_bayes_classifier_instance(attributes,labels,\
instance_to_classify,numerical_indications)
#Place the classification into the result matrix
classification_matrix[instance_index,0]=classification
return classification_matrix
############### END OF THE NAIVE BAYES FUNCTIONS #################
|
import requests
import sys
import re
def get_merged_pull_reqs_since_last_release(token):
"""
Get all the merged pull requests since the last release.
"""
stopPattern = r"^(r|R)elease v"
pull_reqs = []
found_last_release = False
page = 1
print("Getting PRs since last release.")
while not found_last_release:
data = get_merged_pull_reqs(token, page)
# assume we don't encounter it during the loop
last_release_index = 101
for i in range(len(data)):
if re.search(stopPattern, data[i]["title"]):
found_last_release = True
last_release_index = i
break
pull_reqs.extend(data[:last_release_index])
page += 1
# should contain all the PRs since last release
return pull_reqs
def get_merged_pull_reqs(token, page):
"""
Get the merged pull requests based on page. There are
100 results per page. See https://docs.github.com/en/rest/reference/pulls
for more details on the parameters.
:param token, a GitHub API token.
:param page, the page number.
"""
queryPath = "https://api.github.com/repos/devicons/devicon/pulls"
headers = {
"Authorization": f"token {token}"
}
params = {
"accept": "application/vnd.github.v3+json",
"state": "closed",
"per_page": 100,
"page": page
}
print(f"Querying the GitHub API for requests page #{page}")
response = requests.get(queryPath, headers=headers, params=params)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}. Message is {response.text}")
sys.exit(1)
closed_pull_reqs = response.json()
return [merged_pull_req
for merged_pull_req in closed_pull_reqs
if merged_pull_req["merged_at"] is not None]
def is_feature_icon(pull_req_data):
"""
Check whether the pullData is a feature:icon PR.
:param pull_req_data - the data on a specific pull request from GitHub.
:return true if the pullData has a label named "feature:icon"
"""
for label in pull_req_data["labels"]:
if label["name"] == "feature:icon":
return True
return False
def find_all_authors(pull_req_data, token):
"""
Find all the authors of a PR based on its commits.
:param pull_req_data - the data on a specific pull request from GitHub.
:param token - a GitHub API token.
"""
headers = {
"Authorization": f"token {token}"
}
response = requests.get(pull_req_data["commits_url"], headers=headers)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}")
print("Response is: ", response.text)
return
commits = response.json()
authors = set() # want unique authors only
for commit in commits:
try:
# this contains proper referenceable github name
authors.add(commit["author"]["login"])
except TypeError:
# special case
authors.add(commit["commit"]["author"]["name"])
print(f"This URL didn't have an `author` attribute: {pull_req_data['commits_url']}")
return ", ".join(["@" + author for author in list(authors)])
|
#!/usr/bin/env python
# Standard packages
import sys
import argparse
# Third-party packages
from toil.job import Job
# Package methods
from ddb import configuration
from ddb_ngsflow import pipeline
from ddb_ngsflow.utils import utilities
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# args.logLevel = "INFO"
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs)
sys.stdout.write("Processing samples:\n")
for sample in samples:
sys.stdout.write("{}\n".format(sample))
sample_results = dict()
for sample in samples:
scan_job = Job.wrapJobFn(utilities.read_coverage, config, sample, "{}.diagnosetargets.vcf".format(sample),
cores=1, memory="2G")
sample_results[sample] = scan_job.rv()
root_job.addChild(scan_job)
summarize_job = Job.wrapJobFn(utilities.generate_coverage_summary, config, sample_results)
root_job.addFollowOn(summarize_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
|
print('\033[1;34m{:=^40}\033[m'.format(' LOJA OLHOS DA CARA '))
v = float(input('\033[1;33mQual o valor das compras? R$\033[m'))
print('\033[36m-=-'*15)
op = int(input('''\033[1;33mQual a sua condição de pagamento?\033[m
\033[37m[1]-Á vista dinheiro/cheque (10% Desconto)\033[m
\033[36m[2]-Á vista no cartão (5% Desconto)\033[m
\033[37m[3]-Em 2x no cartão (Preço normal)\033[m
\033[36m[4]-3x ou mais no cartão (20% Juros)\033[m
=>'''))
print('\033[36m-=-'*15)
#Calcular a condição de pagamento
if op == 1:
print('\033[32mSua compra de R${} com 10% de desconto será {:.2f}\033[m'.format(v, v - (0.1 * v))) #10% De desconto
elif op == 2:
print('\033[32mSua compra de R${} com 5% de desconto será {:.2f}\033[m'.format(v, v - (0.5 * v))) #5% De desconto
elif op == 3:
print('\033[32mVocê irá pagar , então, R${}\033[m'.format(v))
elif op == 4:
parcelas = int(input('\033[1;33mEm quantas parcelas você irá pagar? \033[m'))
vJuros = v + (0.20 * v)
parcelasJuros = vJuros / parcelas
print('''\033[32mSua compra será parcelada em {}x de R${:.2f} com juros
Sua compra de R${} vai custar R${:.2f} no final\033[m'''.format(parcelas, parcelasJuros, v, vJuros))
else:
print('\033[1;31mOpção inválida de pagamento, tente novamente\033[m')
|
# Generated by Django 3.2.11 on 2022-01-13 19:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("assignments", "0009_populate_enrollments"),
]
operations = [
migrations.RemoveField(
model_name="assignment",
name="course",
),
migrations.RemoveField(
model_name="assignment",
name="user",
),
]
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
"""
Copyright 2015 Austin Ankney, Ming Fang, Wenjun Wang and Yao Zhou
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file defines the concrete control flow logic
"""
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from enum import Enum
"""
Note: This file also contains enumerations for `Type`, `Actiion`, `State`
To avoid conflict:
previous database model `Type` has been rename to `Types`
previous database model `Action` has been rename to `Actions`
"""
class Type(Enum):
News = 1
Restaurant = 2
Movie = 3
Unknown = 10
class Action(Enum):
"""
Enum for actions.
Note:
wherever referred to as aid, referred to this enum
To get a int representation, use aid.value
To get a str representation, use aid.name
"""
NextRandomComment = 1
NextOppositeComment = 2
NextPositiveComment = 3
NextNegativeComment = 4
NextRandomEntity = 5
SentimentStats = 6
EntitySelection = 7
TypeSelection = 8
Greeting = 9
UnknownAction = 10
EntityConfirmation = 11
NextSummary = 21
NextOppositeSummary = 22
NextPositiveSummary = 23
NextNegativeSummary = 24
class State(Enum):
"""
wherever referred as sid, referred this enum
To get the actual int representation, use sid.value
To get str representation, use sid.name
"""
SystemInitiative = 0
TypeSelected = 1
EntitySelected = 2
CommentSelected = 3
RangeSelected = 4
class Step(Enum):
"""
sub state in RangeSelected state
"""
RangeInitiative = 1
TypeSelected = 2
class API(Enum):
Init = 1
Query = 2
Close = 3
class Comment(models.Model):
cid = models.BigIntegerField(primary_key=True)
eid = models.ForeignKey('Entity', db_column='eid')
body = models.TextField()
rating = models.FloatField()
author = models.TextField()
title = models.TextField()
time = models.DateField()
sentiment = models.IntegerField()
class Meta:
managed = False
db_table = 'comment'
class Entity(models.Model):
eid = models.BigIntegerField(primary_key=True)
id = models.TextField()
source = models.TextField()
description = models.TextField()
url = models.TextField()
tid = models.ForeignKey('Types', db_column='tid')
name = models.TextField()
class Meta:
managed = False
db_table = 'entity'
class Types(models.Model):
tid = models.AutoField(primary_key=True)
name = models.TextField()
class Meta:
managed = False
db_table = 'type'
class Evaluation(models.Model):
evid = models.IntegerField(primary_key=True)
userid = models.ForeignKey(User, db_column='userid')
eid = models.ForeignKey(Entity, db_column='eid')
mid = models.ForeignKey('Method', db_column='mid')
cid = models.ForeignKey(Comment, db_column='cid')
score = models.IntegerField()
class Meta:
managed = False
db_table = 'evaluation'
class Method(models.Model):
mid = models.AutoField(primary_key=True)
name = models.TextField()
class Meta:
managed = False
db_table = 'method'
class MiniEntity(models.Model):
eid = models.IntegerField(primary_key=True)
class Meta:
managed = False
db_table = 'mini_entity'
class Summary(models.Model):
cid = models.ForeignKey(Comment, db_column='cid')
mid = models.ForeignKey(Method, db_column='mid')
body = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'summary'
unique_together = (('cid', 'mid'),)
class Actions(models.Model):
aid = models.AutoField(primary_key=True)
name = models.TextField()
method = models.TextField()
class Meta:
managed = False
db_table = 'action'
class History(models.Model):
hid = models.BigIntegerField(primary_key=True)
userid = models.UUIDField()
query = models.TextField()
response = models.TextField()
time = models.DateTimeField()
aid = models.ForeignKey(Actions, db_column='aid', blank=True, null=True)
desired_aid = models.IntegerField(blank=True, null=True)
eid = models.ForeignKey(Entity, db_column='eid', blank=True, null=True)
feedback = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'history'
class Summary(models.Model):
sbid = models.BigIntegerField(primary_key=True)
cid = models.ForeignKey(Comment, db_column='cid')
rank = models.IntegerField()
mid = models.ForeignKey(Method, db_column='mid')
body = models.TextField()
class Meta:
managed = False
db_table = 'summary'
|
import tkinter
import tkMessageBox
top = Tkinter.Tk()
def helloCallBack():
tkMessageBox.showinfo( "Hello Python", "Hello World")
B = Tkinter.Button(top, text ="Hello", command = helloCallBack)
B.pack()
top.mainloop() |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'ios',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
]
def RunSteps(api):
api.ios.checkout()
api.ios.read_build_config()
api.ios.build()
api.ios.upload()
api.ios.test_swarming()
def GenTests(api):
yield (
api.test('basic')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
bot_id='fake-vm',
path_config='kitchen',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'env': {
'fake env var 1': 'fake env value 1',
'fake env var 2': 'fake env value 2',
},
'gn_args': [
'is_debug=true',
'target_cpu="x86"',
],
'bucket': 'fake-bucket-1',
'tests': [
{
'app': 'fake tests 1',
'device type': 'fake device',
'os': '8.0',
},
{
'app': 'fake tests 2',
'device type': 'fake device',
'os': '7.1',
},
{
'app': 'fake_eg_test_host',
'device type': 'fake device 3',
'os': '9.3',
'xctest': True,
},
],
'upload': [
{
'artifact': 'fake tests 1.app',
'compress': True,
},
{
'artifact': 'fake tests 2.app',
'bucket': 'fake-bucket-2',
},
],
})
+ api.step_data(
'bootstrap swarming.swarming.py --version',
stdout=api.raw_io.output_text('1.2.3'),
)
)
yield (
api.test('goma')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
bot_id='fake-vm',
path_config='kitchen',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'gn_args': [
'is_debug=false',
'target_cpu="arm"',
'use_goma=true',
],
'tests': [
],
})
+ api.step_data(
'bootstrap swarming.swarming.py --version',
stdout=api.raw_io.output_text('1.2.3'),
)
)
yield (
api.test('goma_canary')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
bot_id='fake-vm',
path_config='kitchen',
)
+ api.ios.make_test_build_config({
'xcode version': 'fake xcode version',
'gn_args': [
'is_debug=false',
'target_cpu="arm"',
'use_goma=true',
],
'use_goma_canary': True,
'tests': [
],
})
+ api.step_data(
'bootstrap swarming.swarming.py --version',
stdout=api.raw_io.output_text('1.2.3'),
)
)
yield (
api.test('goma_compilation_failure')
+ api.platform('mac', 64)
+ api.properties(
buildername='ios',
buildnumber='0',
mastername='chromium.fake',
bot_id='fake-vm',
)
+ api.ios.make_test_build_config({
'xcode version': '6.1.1',
'gn_args': [
'is_debug=false',
'target_cpu="arm"',
'use_goma=true',
],
'tests': [
{
'app': 'fake test',
'device type': 'fake device',
'os': '8.1',
},
],
})
+ api.step_data(
'compile',
retcode=1,
stdout=api.raw_io.output_text('1.2.3'),
)
)
|
"""This module configures logging for Stibium"""
import logging
logging.basicConfig(format='%(asctime)s-%(name)s-%(levelname)s-%(message)s')
log = logging.getLogger('stibium')
log.setLevel(logging.DEBUG) # FIXME, should be defined at bot instantiation
_fblog = logging.getLogger('client')
_fblog.setLevel(logging.WARNING) # don't ask
|
#
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from django.utils.translation import ugettext_lazy as _
import horizon
# Load the api rest services into Horizon
import starlingx_dashboard.api.rest # noqa: F401 pylint: disable=unused-import
class DCAdmin(horizon.Dashboard):
name = _("Distributed Cloud Admin")
slug = "dc_admin"
default_panel = 'cloud_overview'
# Must be admin and in the dcmanager's service region to manage
# distributed cloud
permissions = ('openstack.roles.admin',
'openstack.services.dcmanager',)
def allowed(self, context):
# Must be in SystemController region
if context['request'].user.services_region != 'SystemController':
return False
return super(DCAdmin, self).allowed(context)
horizon.register(DCAdmin)
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import os
import sys
import time
import threading
from tornado import ioloop
from notebook.notebookapp import NotebookApp, flags, aliases
from traitlets import Bool, Unicode
from jupyterlab_launcher import LabConfig, add_handlers
from selenium import webdriver
from .commands import get_app_dir
here = os.path.dirname(__file__)
test_flags = dict(flags)
test_flags['core-mode'] = (
{'TestApp': {'core_mode': True}},
"Start the app in core mode."
)
test_aliases = dict(aliases)
test_aliases['app-dir'] = 'TestApp.app_dir'
class TestApp(NotebookApp):
default_url = Unicode('/lab')
open_browser = Bool(False)
base_url = '/foo'
flags = test_flags
aliases = test_aliases
core_mode = Bool(False, config=True,
help="Whether to start the app in core mode")
app_dir = Unicode('', config=True,
help="The app directory to build in")
def start(self):
self.io_loop = ioloop.IOLoop.current()
config = LabConfig()
if self.core_mode:
config.assets_dir = os.path.join(here, 'build')
elif self.app_dir:
config.assets_dir = os.path.join(self.app_dir, 'static')
else:
config.assets_dir = os.path.join(get_app_dir(), 'static')
print('****Testing assets dir %s' % config.assets_dir)
config.settings_dir = ''
add_handlers(self.web_app, config)
self.io_loop.call_later(1, self._run_selenium)
super(TestApp, self).start()
def _run_selenium(self):
thread = threading.Thread(target=run_selenium,
args=(self.display_url, self._selenium_finished))
thread.start()
def _selenium_finished(self, result):
self.io_loop.add_callback(lambda: sys.exit(result))
def run_selenium(url, callback):
"""Run the selenium test and call the callback with the exit code.exit
"""
print('Starting Firefox Driver')
driver = webdriver.Firefox()
print('Navigating to page:', url)
driver.get(url)
completed = False
# Start a poll loop.
t0 = time.time()
while time.time() - t0 < 10:
el = driver.find_element_by_id('main')
if el:
completed = True
break
# Avoid hogging the main thread.
time.sleep(0.5)
driver.quit()
# Return the exit code.
if not completed:
callback(1)
else:
if os.path.exists('./geckodriver.log'):
os.remove('./geckodriver.log')
callback(0)
if __name__ == '__main__':
TestApp.launch_instance()
|
if __name__ != '__main__':
from template.Rect import Rect
from template.init import *
else:
from Rect import Rect
from init import *
s3 = boto3.resource('s3')
# fileName = "invoice.pdf"
# bucketName = "poc-cloudformation-bucket"
# function load predefined template co-ordinate from template.json file
def load_template(templateName):
with open('./template/{0}.json'.format(get_file_name_without_extension(templateName)), 'r') as temp:
template = json.load(temp)
# Assigning image resolution to static variable of class Rect
Rect.img_dim = template['resolution']
return template
# function load textract block object.These objects represent lines of text or textual words that are detected on a
# document page from a json file which are formatted on the basis our requirement. And the path of this file is defined
# as a key value pair in "input_format_mapping.json"
def load_input_format(fileName):
input_format = None
with open('./input_format_mapping.json', 'r') as f:
content = json.load(f)
fileLoc = content[get_file_name_without_extension(fileName)]
with open(fileLoc) as f:
input_format = json.loads(f.read())
return input_format
# This function convert the document(pdf) that is store in s3 bucket into image
def convert_pdf_to_image(bucketName, fileName):
obj = s3.Object(bucketName, fileName)
parse = obj.get()['Body'].read()
images = convert_from_bytes(parse)
return images
# Function convert the bounding box co-ordinate from the ration of overall document page into pixels of (x0,y0),(x1,y1)
def create_element_shape(input_format, images):
shapes = []
for shape in input_format:
normalized_bounding_box = shape['geometry']
absolute_bounding_box_width = normalized_bounding_box['Width'] * \
images[0].size[0]
absolute_bounding_box_height = normalized_bounding_box['Height'] * \
images[0].size[1]
x0 = normalized_bounding_box['Left'] * images[0].size[0]
y0 = normalized_bounding_box['Top'] * images[0].size[1]
x1 = x0 + absolute_bounding_box_width
y1 = y0 + absolute_bounding_box_height
shape = [(x0, y0), (x1, y1)]
shapes.append(shape)
return shapes
# function draw rectangle over the image using the bounding box coordinate
def draw_bounding_box(image, shapes, color):
boxed_image = ImageDraw.Draw(image)
for shape in shapes:
boxed_image.rectangle(shape, outline=color)
# function group element on the basis of the template defined and save the result as json file inside template/group_element/
def grouping_element(input_formats, templates, fileName):
group_elements = []
for template in templates:
group_elem = None
group_elem = template
elements = []
for input_format in input_formats:
# Converting bounding box into x0,y0,x1,y1 co-ordinate
group = Rect(template, create='No')
elem = Rect(input_format)
if(group.check_element_inside_group(elem)):
elements.append(input_format)
group_elem["elements"] = elements
group_elements.append(group_elem)
if not path.isdir("template/group_element"):
os.mkdir("template/group_element")
with open('./template/group_element/{0}.json'.format(get_file_name_without_extension(fileName)), 'w+') as f:
f.write(json.dumps(group_elements))
return 'template/group_element/{0}.json'.format(get_file_name_without_extension(fileName))
if __name__ == '__main__':
print('In template.py')
try:
bucketName = sys.argv[1]
fileName = sys.argv[2]
templateName = sys.argv[3]
s3_obj = {"Bucket": bucketName, "Name": fileName}
templates = load_template(templateName)
input_format = load_input_format(fileName)
group_elem_path = grouping_element(
input_format, templates['group'], fileName)
images = convert_pdf_to_image(bucketName, fileName)
shapes = create_element_shape(input_format, images)
draw_bounding_box(images[0], shapes, 'red')
draw_bounding_box(images[0], list(map(
lambda a: [(a['x0'], a['y0']), (a['x1'], a['y1'])], templates['group'])), 'green')
images[0].show()
print('Response of Group Element Path {0}'.format(group_elem_path))
except IndexError:
print('Please provide S3 "Bucket Name" and "File Name" while executing the program.')
|
# -*- coding: utf-8 -*-
import factory
from koalixcrm.crm.models import ReportingPeriod
from koalixcrm.crm.factories.factory_project import StandardProjectFactory
from koalixcrm.crm.factories.factory_reporting_period_status import ReportingReportingPeriodStatusFactory
class StandardReportingPeriodFactory(factory.django.DjangoModelFactory):
class Meta:
model = ReportingPeriod
django_get_or_create = ('title',)
project = factory.SubFactory(StandardProjectFactory)
title = "This is a test project"
begin = '2018-06-15'
end = '2019-06-15'
status = factory.SubFactory(ReportingReportingPeriodStatusFactory)
|
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
import os
import json
import shutil
import fnmatch
import tempfile
import subprocess
import sibis
def export_spiral_files(xnat, resource_location, to_directory, stroop=(None, None, None), verbose=None):
if verbose:
print("Exporting spiral files...")
result = False # Nothing updated or created yet
# resource location contains results dict with path building elements
# NCANDA_E01696/27630/spiral.tar.gz
spiral_nifti_out = os.path.join(to_directory, 'native', 'bold4D.nii.gz')
if not os.path.exists(spiral_nifti_out):
tmpdir = tempfile.mkdtemp()
result = do_export_spiral_files(xnat, resource_location, to_directory, spiral_nifti_out, tmpdir)
shutil.rmtree(tmpdir)
# Do we have information on Stroop files?
if stroop[0]:
stroop_file_out = os.path.join(to_directory, 'native', 'stroop.txt')
# Stroop file does not exist yet, so create it
if not os.path.exists( stroop_file_out ):
tmpdir = tempfile.mkdtemp()
stroop_file_tmp = xnat.select.experiment(stroop[0]).resource(stroop[1]).file(stroop[2]).get_copy(os.path.join( tmpdir, stroop[2]))
from sanitize_eprime import copy_sanitize
copy_sanitize(stroop_file_tmp, stroop_file_out)
shutil.rmtree(tmpdir)
result = True
if verbose:
if result:
print("...done!")
else:
print("...nothing exported!")
return result
def do_export_spiral_files(xnat, resource_location, to_directory, spiral_nifti_out, tmpdir, verbose=None):
# Do the actual export using a temporary directory that is managed by the caller
# (simplifies its removal regardless of exit taken)
[xnat_eid, resource_id, resource_file_bname] = resource_location.split('/')
tmp_file_path = xnat.select.experiment(xnat_eid).resource(resource_id).file(resource_file_bname).get_copy(os.path.join(tmpdir, "pfiles.tar.gz"))
errcode, stdout, stderr = untar_to_dir(tmp_file_path, tmpdir)
if errcode != 0:
error="ERROR: Unable to un-tar resource file. File is likely corrupt."
sibis.logging(xnat_eid,error,
tempfile_path=tmp_file_path,
resource_location=resource_location)
if verbose:
print "StdErr:\n{}".format(stderr)
print "StdOut:\n{}".format(stdout)
return False
spiral_E_files = glob_for_files_recursive(tmpdir, pattern="E*P*.7")
if len(spiral_E_files) > 1:
error = "ERROR: more than one E file found"
sibis.logging(xnat_eid,error,
spiral_e_files = ', '.join(spiral_E_files))
return False
physio_files = glob_for_files_recursive(tmpdir, pattern="P*.physio")
if len(physio_files) > 1:
error = 'More than one physio file found.'
sibis.logging(xnat_eid,error,
tmp_file_path=tmp_file_path,
physio_files=physio_files)
return False
if len(spiral_E_files) == 1:
# Make directory first
spiral_dir_out = os.path.join(to_directory, 'native')
if not os.path.exists(spiral_dir_out):
os.makedirs(spiral_dir_out)
# Now try to make the NIfTI
errcode, stdout, stderr = make_nifti_from_spiral(spiral_E_files[0], spiral_nifti_out)
if not os.path.exists(spiral_nifti_out):
error="Unable to make NIfTI from resource file, please try running makenifti manually"
sibis.logging(xnat_eid, error,
spiral_file=spiral_E_files[0])
if verbose:
print "StdErr:\n{}".format(stderr)
print "StdOut:\n{}".format(stdout)
return False
else:
error = "ERROR: no spiral data file found"
sibis.logging(xnat_eid,error,
resource_location=resource_location)
return False
if len(physio_files) == 1:
spiral_physio_out = os.path.join(to_directory, 'native', 'physio')
shutil.copyfile(physio_files[0], spiral_physio_out)
gzip(spiral_physio_out)
return True
def make_nifti_from_spiral(spiral_file, outfile):
cmd = "makenifti -s 0 %s %s" % (spiral_file, outfile[:-7])
errcode, stdout, stderr = call_shell_program(cmd)
if os.path.exists(outfile[:-3]):
gzip(outfile[:-3])
return errcode, stdout, stderr
def call_shell_program(cmd):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
return process.returncode, out, err
def untar_to_dir(tarfile, out_dir):
cmd = "tar -xzf %(tarfile)s --directory=%(out_dir)s"
cmd = cmd % {'tarfile':tarfile,
'out_dir':out_dir
}
errcode, stdout, stderr = call_shell_program(cmd)
return errcode, stdout, stderr
def glob_for_files_recursive(root_dir, pattern):
"""Globs for files with the pattern rules used in bash. """
match_files = []
for root, dirs, files in os.walk(root_dir, topdown=False):
match_files += [os.path.join(root, fname) for fname in files if fnmatch.fnmatch(fname, pattern)]
return match_files
def gzip(infile):
cmd = 'gzip -9 %s' % infile
call_shell_program(cmd)
|
n,m,*a=map(int,open(0).read().split())
c=[0]*-~n
for i in a[:m]:c[i]+=1
q=c.index(0)
for p,x in zip(a,a[m:]):c[p]-=1;c[x]+=1;q=min((p,q)[c[p]>0:])
print(q) |
# a simple attempt at a color matching, stack choosing card game
# joadavis Oct 20, 2016
# Another attempt at a game in one .py file
# No AI for this version, just two players taking turns
# objects - cards, buttons [draw, place, take, help], game session (to track turns)
# need to display scores and player labels
# display a "who won" message at the end
import pygame
import random
GAME_WHITE = (250, 250, 250)
GAME_BLACK = (0, 0, 0)
GAME_GREEN = (0,55,0)
GAME_SPLASH = (25, 80, 25)
class GameSession(object):
pass
class SomeButton(pygame.sprite.Sprite):
label = ""
def __init__(self, x, y):
super().__init__()
# image setup
self.image = pygame.Surface([40,20])
self.init_draw(self.image)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def init_draw(self, screen):
pygame.draw.rect(screen, GAME_BLACK, [0, 0, 50, 50])
class SplashBox(pygame.sprite.Sprite):
welcome_message = ["Welcome to Fort Collorins.","",
"This is a two player card game.",
"On your turn, either draw a new card and place it on a pile,", " or choose a pile to add to your stacks.",
"Play until there are less than 15 cards left in deck.",
"Only your three largest stacks are scored for you,", " the rest count against your score.",
"", "Click this dialog to begin." ]
rect = [0,0,1,1]
def __init__(self, x, y):
super().__init__()
print("splash init")
# image setup
self.image = pygame.Surface([400,250])
self.init_draw(self.image)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self):
print("Splash")
pass
def init_draw(self, screen):
# now using sprite, so coords relative within sprite image (screen)
# upper left corner x and y then width and height (downward)
pygame.draw.rect(screen, GAME_SPLASH, self.rect)
infont = pygame.font.Font(None, 18)
for msg_id in range(len(self.welcome_message)):
text = infont.render(self.welcome_message[msg_id], True, GAME_WHITE)
screen.blit(text, [30, 30 + msg_id * 18])
class Card(pygame.sprite.Sprite):
def __init__(self, color, x, y):
super().__init__()
self.color = color
self.flip = 0 # face down
# image setup
self.image = pygame.Surface([50,50])
self.init_draw(self.image)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self):
print("upd")
pass
def init_draw(self, screen):
# now using sprite, so coords relative within sprite image (screen)
# upper left corner x and y then width and height (downward)
pygame.draw.rect(screen, GAME_BLACK, [0, 0, 50, 50])
def draw_finger(screen, x, y):
pygame.draw.polygon(screen, GAME_WHITE,
[ [x,y], [x+2, y], [x+2, y+5],
[x+8, y+5], [x+7, y+15],
[x+1, y+15], [x, y] ] )
# Setup --------------------------------------
pygame.init()
# Set the width and height of the screen [width,height]
size = [700, 500]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Ft. Collorins")
# try defining this in constants
afont = pygame.font.Font(None, 18)
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Hide the mouse cursor
pygame.mouse.set_visible(0)
splash = SplashBox(100, 100)
dialog_group = pygame.sprite.Group()
dialog_group.add(splash)
splash_show = True
# -------- Main Program Loop -----------
while not done:
# ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT
click_event = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
# User clicks the mouse. Get the position
click_pos = pygame.mouse.get_pos()
print("Click ", click_pos)
click_event = True
# ALL EVENT PROCESSING SHOULD GO ABOVE THIS COMMENT
# ALL GAME LOGIC SHOULD GO BELOW THIS COMMENT
pos = pygame.mouse.get_pos()
x = pos[0]
y = pos[1]
# ALL GAME LOGIC SHOULD GO ABOVE THIS COMMENT
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
# First, clear the screen to ___. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill( (0,55,0) )
if splash_show:
dialog_group.draw(screen)
if click_event and splash.rect.collidepoint(click_pos[0], click_pos[1]):
splash_show = False
draw_finger(screen, x, y)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
|
S, W = map(int, input().split())
if(W >= S):
print("unsafe")
else:
print("safe") |
from django import forms
from .models import Pet
class PetForm(forms.ModelForm):
class Meta:
model = Pet
fields = ["name","type","genus","age","explanation","pet_image"]
|
"""
Build the given Jail base
"""
from .parser import Jockerfile
from .backends.utils import get_backend
def build(jockerfile='Jockerfile', build=None, install=False):
"""
Build the base from the given Jockerfile.
"""
jail_backend = get_backend()
jockerfile = Jockerfile(jockerfile)
jail_backend.build(jockerfile, build=build, install=install)
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from . import resampling as rs
def plotClouds(Vs, colors, fignum):
color_list = ['r','g','b','c','y','m','k']
fig = plt.figure(fignum)
ax = fig.add_subplot(111, projection='3d')
if colors:
for V, color in zip(Vs, colors):
ax.scatter(V[0,:], V[1,:], V[2,:], c=color.transpose()/255.0, marker='o', s=9)
else:
for i, V in enumerate(Vs):
cind = min(6,i)
ax.scatter(V[0,:], V[1,:], V[2,:], c=color_list[cind], marker='o', s=9)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def plotCloudsModel(Vs, colors, X, fignum):
color_list = ['r','g','b','c','y','m']
fig = plt.figure(fignum)
ax = fig.add_subplot(111, projection='3d')
if colors:
for V, color in zip(Vs, colors):
Vc = rs.random_sampling([V,color], 2000)
V = Vc[0]
cc = Vc[1].transpose()
ax.scatter(V[0,:], V[1,:], V[2,:], c=cc/255.0, marker='o', s=9)
for i, V in enumerate(Vs):
Vv = rs.random_sampling(V, 2000)
cind = min(5,i)
ax.scatter(Vv[0,:], Vv[1,:], Vv[2,:], c=color_list[cind], marker='o', s=9)
ax.scatter(X[0,:], X[1,:], X[2,:], c='k', marker='o', s=100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
|
#!/usr/bin/env python
"""
Contains the sqlintf.DataProduct class definition
Please note that this module is private. The sqlintf.DataProduct class is
available in the ``wpipe.sqlintf`` namespace - use that instead.
"""
from .core import sa, orm
from .OptOwner import OptOwner
__all__ = ['DataProduct']
class DataProduct(OptOwner):
"""
A DataProduct object represents a row of the `dataproducts` table.
DO NOT USE CONSTRUCTOR: constructing a DataProduct object adds a new
row to the database: USE INSTEAD ITS WPIPE COUNTERPART.
"""
__tablename__ = 'dataproducts'
id = sa.Column(sa.Integer, sa.ForeignKey('optowners.id'), primary_key=True)
filename = sa.Column(sa.String(256))
relativepath = sa.Column(sa.String(256))
suffix = sa.Column(sa.String(256))
data_type = sa.Column(sa.String(256))
subtype = sa.Column(sa.String(256))
group = sa.Column(sa.String(256))
filtername = sa.Column(sa.String(256))
ra = sa.Column(sa.Float)
dec = sa.Column(sa.Float)
pointing_angle = sa.Column(sa.Float)
dpowner_id = sa.Column(sa.Integer, sa.ForeignKey('dpowners.id'))
dpowner = orm.relationship("DPOwner", back_populates="dataproducts")
config_id = input_id = pipeline_id = orm.synonym("dpowner_id")
config = input = pipeline = orm.synonym("dpowner")
__mapper_args__ = {
'polymorphic_identity': 'dataproduct',
}
__table_args__ = (sa.UniqueConstraint('dpowner_id', 'group', 'filename'),
)
|
from tkinter import *
from tkinter import simpledialog, filedialog
from os import system
from tkinter import font
from time import sleep
from tkinter import colorchooser
from os import system, name
import re
from time import sleep
def clear():
# for windows
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
clear()
carrot = "{"
carrot2 = "}"
print("AdkCode 0.1.4 - CLI")
def main(choice = "Normal"):
print("AdkCode 0.1.4 - UI SETUP....")
sleep(1)
root = Tk("")
root.title("adkCode 0.1.4")
text=Text(root)
text.grid()
def key(event):
text.tag_remove('found', '1.0', END)
text.tag_remove('foundin', '1.0', END)
text.tag_remove('foundfun', '1.0', END)
text.tag_remove('yellow', '1.0', END)
def highlight_regex(regex, tag):
length = IntVar()
start = 1.0
idx = text.search(regex, start, stopindex=END, regexp=1, count=length)
while idx:
end = f"{idx}+{length.get()}c"
text.tag_add(tag, idx, end)
start = end
idx = text.search(regex, start, stopindex=END, regexp=1, count=length)
highlight_regex(r"[#][a][p][e][^a-zA-Z]", "utility")
highlight_regex(r"[#][m][a][x][-][m][e][m][o][r][y][^a-zA-Z]", "utility")
highlight_regex(r"[c][l][a][s][s][ ]", "keyword")
highlight_regex(r"[f][u][n][c][t][^a-zA-Z]", "keyword")
highlight_regex(r"[\'][^\']*[\']", "string")
highlight_regex(r"[\"][^\']*[\"]", "string")
highlight_regex(r"(\d)+[.]?(\d)*", "number")
highlight_regex(r"[^ \t\n\r\f\v]*[(]", "number")
text.tag_config('number', foreground="blue")
text.tag_config('string', foreground="green")
text.tag_config('keyword', foreground="red")
text.tag_config('utility', foreground="brown")
#find("#ape", "yellow")
#find("#max-memory", "yellow")
root.bind_all('<Key>', key)
if choice == "server":
text.insert(INSERT, f'\n#include server\n')
text.insert(INSERT, f"render_file('index.html')\n")
text.insert(INSERT, f"errorhandler(404, 'error.html')\n")
text.insert(INSERT, f"run_server()\n")
def run():
global text
t = text.get("1.0", "end-1c")
def text_color():
# Pick a color
my_color = colorchooser.askcolor()[1]
if my_color:
# Create our font
color_font = font.Font(text, text.cget("font"))
# Configure a tag
text.tag_configure("colored", font=color_font, foreground=my_color)
# Define Current tags
current_tags = text.tag_names("sel.first")
# If statment to see if tag has been set
if "colored" in current_tags:
text.tag_remove("colored", "sel.first", "sel.last")
else:
text.tag_add("colored", "sel.first", "sel.last")
def addText():
textfieldkey = simpledialog.askstring(title="Insert A Text Object", prompt="Title:")
text.insert(INSERT, f'\ntextlabel("{textfieldkey}")')
def addButton():
buttonkey = simpledialog.askstring(title="Insert A Button Object", prompt="Title:")
callFunc = simpledialog.askstring(title="Insert A Button Object", prompt="Function On Press")
text.insert(INSERT, f'\nbutton("{buttonkey}", {callFunc}())')
def addIf():
ifstate = simpledialog.askstring(title="If Statement", prompt="Condition")
res = simpledialog.askstring(title="Result", prompt="On True:")
text.insert(INSERT, f'\nif {ifstate} {carrot}\n')
text.insert(INSERT, f' {res}\n')
text.insert(INSERT, f'{carrot2}\n')
def addRepeat():
iterationVar = simpledialog.askstring(title="Iteration Variable", prompt="Name: ")
repeat = simpledialog.askstring(title="Repeat", prompt="Cycles: ")
stuff = simpledialog.askstring(title="Code", prompt="Statement: ")
text.insert(INSERT, f'\n{iterationVar} = 0 while {iterationVar} < {int(repeat) + 1} {carrot}\n')
text.insert(INSERT, f' {stuff}\n')
text.insert(INSERT, f'{carrot2}\n')
def addInclude():
file = simpledialog.askstring(title="Include Statement", prompt="Include:")
text.insert(INSERT, f'\n#include {file}')
def saveas():
t = text.get("1.0", "end-1c")
savelocation=filedialog.asksaveasfilename()
file1=open(savelocation, "w+")
file1.write(t)
file1.close()
buttonframe = Frame(root)
langframe = Frame(root)
widgetframe = Frame(root)
functionsframe = Frame(root)
button7=Button(buttonframe, text="Colorify(Clarification)", command=text_color)
button6=Button(langframe, text="Repeat(Custom)", command=addRepeat)
button5=Button(langframe, text="Include Statement", command=addInclude)
button4=Button(langframe, text="If Statement", command=addIf)
button=Button(widgetframe, text="Label", command=addText)
button3=Button(widgetframe, text="Button", command=addButton)
button1=Button(functionsframe, text="Save", command=saveas)
button2=Button(functionsframe, text="Run", command=run)
c=Label(root, text="Language", font=("Helvetica", 30))
c.grid()
langframe.grid()
button5.grid(row=0, column=0)
button6.grid(row=0, column=1)
button4.grid(row=0, column=2)
a=Label(root, text="Widgets", font=("Helvetica", 30))
a.grid()
widgetframe.grid()
button.grid(row=0, column=3)
button3.grid(row=0, column=4)
b=Label(root, text="Functions", font=("Helvetica", 30))
b.grid()
functionsframe.grid()
button1.grid(row=0, column=5)
button2.grid(row=0, column=6)
while True:
choice = input(">>> ")
cmd = choice.split()
try:
if cmd[0].lower() == "ui" and len(cmd) == 1:
print("Close the UI window if you want to continue the CLI...")
main()
clear()
print("AdkCode 0.1.4 - CLI")
elif cmd[0].lower() == "ui" and len(cmd) > 1:
if cmd[1] == "server":
main("server")
clear()
print("AdkCode 0.1.4 - CLI")
elif cmd[0].lower() == "clear":
clear()
print("AdkCode 0.1.4 - CLI")
elif cmd[0].lower() == "clear":
clear()
print("AdkCode 0.1.4 - CLI")
except:
continue
|
"""
Author: Justin Cappos
Start Date: 27 June 2008
Description:
Adapted from repy's emulcomm (part of the Seattle project)
"""
import socket
STABLE_PUBLIC_IPS = ["18.7.22.69", # M.I.T
"171.67.216.8", # Stanford
"169.229.131.81", # Berkley
"140.142.12.202"] # Univ. of Washington
def get_localIP_to_remoteIP(connection_type, external_ip, external_port=80):
"""
<Purpose>
Resolve the local ip used when connecting outbound to an external ip.
<Arguments>
connection_type:
The type of connection to attempt. See socket.socket().
external_ip:
The external IP to attempt to connect to.
external_port:
The port on the remote host to attempt to connect to.
<Exceptions>
As with socket.socket(), socketobj.connect(), etc.
<Returns>
The locally assigned IP for the connection.
"""
# Open a socket
sockobj = socket.socket(socket.AF_INET, connection_type)
try:
sockobj.connect((external_ip, external_port))
# Get the local connection information for this socket
(myip, localport) = sockobj.getsockname()
# Always close the socket
finally:
sockobj.close()
return myip
# Public interface
def getmyip():
"""
<Purpose>
Provides the external IP of this computer. Does some clever trickery.
<Arguments>
None
<Exceptions>
As from socket.gethostbyname_ex()
<Side Effects>
None.
<Returns>
The localhost's IP address
python docs for socket.gethostbyname_ex()
"""
# I got some of this from: http://groups.google.com/group/comp.lang.python/browse_thread/thread/d931cdc326d7032b?hl=en
# however, it has been adapted...
# Initialize these to None, so we can detect a failure
myip = None
# It's possible on some platforms (Windows Mobile) that the IP will be
# 0.0.0.0 even when I have a public IP and the external IP is up. However, if
# I get a real connection with SOCK_STREAM, then I should get the real
# answer.
for conn_type in [socket.SOCK_DGRAM, socket.SOCK_STREAM]:
# Try each stable IP
for ip_addr in STABLE_PUBLIC_IPS:
try:
# Try to resolve using the current connection type and
# stable IP, using port 80 since some platforms panic
# when given 0 (FreeBSD)
myip = get_localIP_to_remoteIP(conn_type, ip_addr, 80)
except (socket.error, socket.timeout):
# We can ignore any networking related errors, since we want to try
# the other connection types and IP addresses. If we fail,
# we will eventually raise an exception anyways.
pass
else:
# Return immediately if the IP address is good
if myip != None and myip != '' and myip != "0.0.0.0":
return myip
# Since we haven't returned yet, we must have failed.
# Raise an exception, we must not be connected to the internet
raise Exception("Cannot detect a connection to the Internet.")
if __name__ == '__main__':
print(getmyip())
|
""" Some common path functions and classes applications. """
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright (C) 2018-2019 Brian Allen Vanderburg II"
__license__ = "Apache License 2.0"
__all__ = ["AppPathsBase", "AppPaths"]
import os
import tempfile
from typing import Sequence, Optional
from . import platform
class AppPathsBase:
""" An application path base object. """
def __init__(self):
""" Initialize the applications paths object. """
self._temp_dir = None
# Many of the paths are just implemented as properties
@property
def user_data_dir(self) -> str:
""" Return the read/write user data directory.
Returns
-------
str
The path where user data files should be read and written.
"""
raise NotImplementedError()
@property
def sys_data_dirs(self) -> Sequence[str]:
""" Return the list of data directories to read data files from.
Returns
-------
Sequence[str]
A list of directories for reading data files.
"""
raise NotImplementedError()
@property
def user_config_dir(self) -> str:
""" Return the read/write user configuration directory.
Returns
-------
str
The path where user configuration files should be read and written.
"""
raise NotImplementedError()
@property
def sys_config_dirs(self) -> Sequence[str]:
""" Return the list of directories to read configuration files from.
Returns
-------
Sequence[str]
A list of directories for reading configuration files.
"""
raise NotImplementedError()
@property
def cache_dir(self) -> str:
""" Return a directory for storing cached files.
Returns
-------
str
Cache directory
"""
return os.path.join(self.user_data_dir, "cache")
@property
def runtime_dir(self) -> str:
""" Return the runtime directory.
Returns
-------
str
Directory where runtime files such as sockets should be created
"""
return os.path.join(self.temp_dir, "run")
@property
def temp_dir(self) -> str:
""" Returns a temp directory for storing files. Multiple calls will
access the same directory.
Returns
-------
str
A temp directory for storing files.
"""
if self._temp_dir is None or not os.path.isdir(self._temp_dir):
self._temp_dir = tempfile.mkdtemp()
return self._temp_dir
class AppPaths(AppPathsBase):
""" An applications paths object.
This application paths object uses the default platform path to get path
names. It contains the appname, version, and vendor information.
"""
def __init__(
self,
appname: str,
version: Optional[str] = None,
vendor: Optional[str] = None,
prefix: Optional[str] = None
):
""" Initialize the paths object.
Parameters
----------
appname : str
The application name as would be used for a directory path.
version : Optional[str]
The application version information as would be used for a path
vendor : Optional[str]
The application vendor as would be used for a path.
prefix : Optional[str]
The install prefix. This may not be used on some platforms or
for some paths/.
"""
AppPathsBase.__init__(self)
self._appname = appname
self._version = version
self._vendor = vendor
self._prefix = prefix
self._paths = platform.path
@property
def user_data_dir(self):
return self._paths.get_user_data_dir(
self._appname, self._version, self._vendor
)
@property
def sys_data_dirs(self):
return self._paths.get_sys_data_dirs(
self._appname, self._version, self._vendor, self._prefix
)
@property
def user_config_dir(self):
return self._paths.get_user_config_dir(
self._appname, self._version, self._vendor
)
@property
def sys_config_dirs(self):
return self._paths.get_sys_config_dirs(
self._appname, self._version, self._vendor
)
@property
def cache_dir(self):
return self._paths.get_cache_dir(
self._appname, self._version, self._vendor
)
@property
def runtime_dir(self):
return self._paths.get_runtime_dir(
self._appname, self._version, self._vendor
)
|
import pytest
import cv2
from plantcv.plantcv import analyze_color, outputs
@pytest.mark.parametrize("colorspace", ["all", "lab", "hsv", "rgb"])
def test_analyze_color(colorspace, test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
mask = cv2.imread(test_data.small_bin_img, -1)
_ = analyze_color(rgb_img=img, mask=mask, colorspaces=colorspace)
assert outputs.observations['default']['hue_median']['value'] == 80.0
def test_analyze_color_bad_imgtype(test_data):
"""Test for PlantCV."""
img_binary = cv2.imread(test_data.small_bin_img, -1)
mask = cv2.imread(test_data.small_bin_img, -1)
with pytest.raises(RuntimeError):
_ = analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
def test_analyze_color_bad_hist_type(test_data):
"""Test for PlantCV."""
img = cv2.imread(test_data.small_rgb_img)
mask = cv2.imread(test_data.small_bin_img, -1)
with pytest.raises(RuntimeError):
# TODO: change hist_plot_type to colorspaces after deprecation
_ = analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.