blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26b05b882a260c212c2dd400b26cb5baa8526671 | f1a91c56cef7f8f21c0e154118278f394acf8a0c | /app/util/import_util.py | ca363568c8866118a1bc41ab376ad7e8f2b11b6a | [] | no_license | imsazzad/fastapi-python-template | ad5935d556a4798a2504deaebf6b6a3a03ebb0a3 | ec28b908e0cf419083ce1ec8235a71ee542d85c8 | refs/heads/main | 2023-08-21T00:09:21.482963 | 2021-10-01T05:24:17 | 2021-10-01T05:24:17 | 412,339,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import glob
import importlib
import os
import pathlib
import sys
class ImportUtil:
@staticmethod
def import_modules_from_directory_as_list(module_directory: str) -> list:
sys.path.append(module_directory)
py_files: list = glob.glob(os.path.join(module_directory, '*.py'))
modules: list = []
for py_file in py_files:
module_name = pathlib.Path(py_file).stem
if module_name == '__init__':
continue
else:
modules.append(importlib.import_module(module_name))
return modules
| [
"abdu.hasib@Infolytx.com"
] | abdu.hasib@Infolytx.com |
df760f3fb2bae9441d342cf168781c8ce3d3cf92 | 261fa6004234ccae2b1a4ff455ae54aefecbb172 | /ui_extensions/content_export/views.py | cc9e021e8399ec531eb798666ee498596ae79847 | [
"Apache-2.0"
] | permissive | svang001/cloudbolt-forge | 671575eecd54e1207b7dde144db2fdb6c43c9ddf | 3796900115876f8a9ee333b75f45e3d60d7705d7 | refs/heads/master | 2023-02-23T23:03:33.225739 | 2021-01-19T20:09:21 | 2021-01-19T20:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | import requests
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.html import mark_safe
from cbhooks.models import (
HookPointAction, RecurringActionJob, ServerAction, ResourceAction, TriggerPoint
)
from extensions.models import UIExtension, XUIIndexer
from extensions.views import admin_extension
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import dialog_view
from utilities.permissions import cbadmin_required
from xui.content_export.forms import ExportContentForm
@admin_extension(title='Exportable Contents', description='All Exportable CloudBolt Contents')
@cbadmin_required
def export_content_list(request):
"""
View for listing metadata for all exportable contents.
"""
proto = request.META['wsgi.url_scheme']
host = request.META['HTTP_HOST']
resp = requests.get('{}://{}/api/v2/exportable-content/?version=dev'.format(proto, host), verify=False)
exportable_contents = []
response = resp.json()
from api.v2.serializers import keys_hyphens_to_underscores
if 'server-actions' in response:
for sa in response['server-actions']:
sa['id'] = sa['package-url'].split('/')[-2]
sa['collections'] = 'server-actions'
exportable_contents.append(keys_hyphens_to_underscores(sa))
if 'orchestration-actions' in response:
for oa in response['orchestration-actions']:
oa['id'] = oa['package-url'].split('/')[-2]
oa['collections'] = 'orchestration-actions'
exportable_contents.append(keys_hyphens_to_underscores(oa))
if 'ui-extension-packages' in response:
XUIIndexer().index()
for ui in response['ui-extension-packages']:
id = ui['package-url'].split('/')[-1]
ui['id'] = UIExtension.objects.get(name=id).id
ui['collections'] = 'ui-extension-packages'
exportable_contents.append(keys_hyphens_to_underscores(ui))
if 'blueprints' in response:
for bp in response['blueprints']:
bp['id'] = bp['package-url'].split('/')[-2]
bp['collections'] = 'blueprints'
exportable_contents.append(keys_hyphens_to_underscores(bp))
if 'recurring-jobs' in response:
for job in response['recurring-jobs']:
job['id'] = job['package-url'].split('/')[-2]
job['collections'] = 'recurring-jobs'
exportable_contents.append(keys_hyphens_to_underscores(job))
if 'resource-actions' in response:
for ra in response['resource-actions']:
ra['id'] = ra['package-url'].split('/')[-2]
ra['collections'] = 'resource-actions'
exportable_contents.append(keys_hyphens_to_underscores(ra))
list_context = {
'exportable_contents': exportable_contents,
'pagetitle': 'Exportable Contents',
}
return render(request, 'content_export/templates/list.html', list_context)
@dialog_view
@cbadmin_required
def export_content_edit(request, id=None, collections=''):
"""
Edit exportable contents
"""
if collections == 'blueprints':
instance = ServiceBlueprint.objects.get(id=id)
elif collections == 'resource-actions':
instance = ResourceAction.objects.get(id=id)
elif collections == 'server-actions':
instance = ServerAction.objects.get(id=id)
elif collections == 'recurring-jobs':
instance = RecurringActionJob.objects.get(id=id)
elif collections == 'orchestration-actions':
instance = HookPointAction.objects.get(id=id)
elif collections == 'ui-extension-packages':
instance = UIExtension.objects.get(id=id)
if request.method == 'POST':
form = ExportContentForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
msg = "Metadata details for {} have been saved.".format(instance)
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = ExportContentForm(instance=instance)
return {
'title': 'Edit Exportable Metadata',
'form': form,
'action_url': reverse('export_content_edit', args=[id, collections]),
'use_ajax': True,
'submit': 'Save',
'extra_onready_js': mark_safe("$('.render_as_datepicker').datepicker({dateFormat: 'yy-mm-dd'});")
}
| [
"klaratta@cloudboltsoftware.com"
] | klaratta@cloudboltsoftware.com |
1a07d3114e74fadea676842c7d35a5dae102c80b | 96fd91e48b5e08987206616d4a476f7fcb629742 | /packaging_project/pkg1/imported_module.py | b6a2cba73091d758f76b88bec7b639c917176043 | [
"MIT"
] | permissive | QikaiYang/ultimate-utils | 048efc2dd0812bd9fc9d24206acc753b288594bf | 50db0b96f2b3144ef008e29757990c688615951d | refs/heads/master | 2023-03-31T13:23:17.856199 | 2021-03-30T21:25:03 | 2021-03-30T21:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | print('\n------> you are importing: imported_module.py\n')
def func_from_imported_module():
print("I'm a function from func_from_imported_module") | [
"miranda9@illinois.edu"
] | miranda9@illinois.edu |
490fcdfb16141de4f142150d27b614173af087da | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/quality_control_and_material_testing/doctype/bulk_density_of_masonary/bulk_density_of_masonary.py | 682281f533740a8c16ef57cb3acb6c2e523d8ca2 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class BulkDensityOfmasonary(Document):
pass
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
acdf56c82a6bb37ed814ba0a5223a77421137d5c | ef78bd58d61002f45778a40da7759ed0b1998cd3 | /code/transforms/univariategaussianization.py | 85eb0ed34aec6c919cee82f5578985a62cf4bd41 | [
"MIT"
] | permissive | afcarl/isa | 61e85c0c790c7cc357e0c29fc5bda948e9c77ce4 | f0497c0cc7bd72e0de7f4f9a8da40e214c22abe9 | refs/heads/master | 2020-03-19T21:36:06.716167 | 2013-01-28T18:32:30 | 2013-01-28T18:32:30 | 136,944,562 | 1 | 0 | null | 2018-06-11T15:20:45 | 2018-06-11T15:20:44 | null | UTF-8 | Python | false | false | 1,634 | py | __license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from scipy.special import erf, erfinv
from scipy.stats import norm
from scipy.optimize import bisect
from numpy import mean, sqrt, asarray, max, min, any
from transforms import Transform
import pdb
class UnivariateGaussianization(Transform):
def __init__(self, mog):
self.mog = mog
def apply(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply model CDF
data = self.mog.cdf(data)
# apply inverse Gaussian CDF
result = erfinv(data * 2. - 1.)
result[result > 6.] = 6.
result[result < -6.] = -6.
return result * sqrt(2.)
def inverse(self, data, max_iter=100):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply Gaussian CDF
data = norm.cdf(data)
# apply inverse model CDF
val_max = mean(self.mog.means) + 1.
val_min = mean(self.mog.means) - 1.
for t in range(data.shape[1]):
# make sure root lies between val_min and val_max
while float(self.mog.cdf(val_min)) > data[0, t]:
val_min -= 1.
while float(self.mog.cdf(val_max)) < data[0, t]:
val_max += 1.
# find root numerically
data[0, t] = bisect(
f=lambda x: float(self.mog.cdf(x)) - data[0, t],
a=val_min,
b=val_max,
maxiter=max_iter,
disp=False)
return data
def logjacobian(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
data_ug = self.apply(data)
return self.mog.loglikelihood(data) - norm.logpdf(data_ug)
| [
"lucas@theis.io"
] | lucas@theis.io |
0e4607a31bfe4d869574f4f1f64d51451bfc2ea9 | 386e9bf84397502fea2662df723f4b04ea1703c9 | /03_01_find_the_access_codes/answer.py | 19a52eed9d02b49a5efadc33b604fb4f8eba07e7 | [] | no_license | w9/google-foobar | 1f1b3e33519e41dc779a93efdf0514ab12848cfc | 92c9eacfd5eb3f41539bd94e842ea9567a264976 | refs/heads/master | 2021-01-23T19:25:44.403029 | 2017-09-09T23:46:49 | 2017-09-09T23:46:49 | 102,820,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from itertools import repeat
from functools import partial
def transition(m, a):
""" Matrix-vector multiplication.
"""
return [sum([a[i] for i in r]) for r in m]
def get_trans_mat(l):
""" *Divisible and preceeding in list* is a partial order.
This function gives the transition matrix of the corresponding DAG,
in a sparse format.
"""
return [[i for i in range(j) if i < j and l[j] % l[i] == 0]
for j in range(len(l))]
def answer(l):
trans_mat = get_trans_mat(l)
walk = partial(transition, trans_mat)
one_step = walk(list(repeat(1, len(l))))
two_step = walk(one_step)
return sum(two_step)
| [
"zhuxun2@gmail.com"
] | zhuxun2@gmail.com |
5e67a2f09a383a84ad7ca7dd67a4e00604fb28ab | 313869ac13ee6cfdaf2de5cb76adf3dec981513f | /venv/Lib/site-packages/pandas/core/groupby/groupby.py | 580a12327ddf5080bc0457a1effcbeade1d3ea69 | [] | no_license | praful-pra1/Machine-Learning-GMCA | c4a5a4fa49b17bd0461d17b40dc169970ee2acde | f93dcf2b8557be4c57ea99f4e8a3756140d2ba6c | refs/heads/master | 2022-12-25T01:09:08.318614 | 2020-10-04T04:57:45 | 2020-10-04T04:57:45 | 301,042,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95,215 | py | """
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
from pandas.core.util.numba_ import maybe_use_numba
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_groupby_agg_method_template = """
Compute {fname} of group values.
Parameters
----------
numeric_only : bool, default {no}
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default {mc}
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed {fname} of values within each group.
"""
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a {klass} or when passed to {klass}.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
{klass}
See Also
--------
{klass}.groupby.apply
{klass}.groupby.transform
{klass}.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
{examples}
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_allowlist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
ax = self.obj._info_axis
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_allowlist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_allowlist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._obj_with_exclusions, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._obj_with_exclusions), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self._python_apply_general(curried, self._obj_with_exclusions)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f, self._selected_obj)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f, self._selected_obj)
return result
def _python_apply_general(
self, f: F, data: FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
Returns
-------
Series or DataFrame
data after applying f
"""
keys, values, mutated = self.grouper.apply(f, data, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
filled_series = self.grouper.size().fillna(0)
assert filled_series is not None
return filled_series.gt(0).any() and func_nm not in base.cython_cast_blocklist
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _agg_general(
self,
numeric_only: bool = True,
min_count: int = -1,
*,
alias: str,
npfunc: Callable,
):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if maybe_use_numba(engine):
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f, self._selected_obj)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(bool)
return vals.view(np.uint8), bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
numeric_only=False,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
return self._get_cythonized_result(
"group_var_float64",
aggregate=True,
needs_counts=True,
needs_values=True,
needs_2d=True,
cython_dtype=np.dtype(np.float64),
post_processing=lambda vals, inference: np.sqrt(vals),
ddof=ddof,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
result = self.std(ddof=ddof)
if result.ndim == 1:
result /= np.sqrt(self.count())
else:
cols = result.columns.get_indexer_for(
result.columns.difference(self.exclusions).unique()
)
# TODO(GH-22046) - setting with iloc broken if labels are not unique
# .values to remove labels
result.iloc[:, cols] = (
result.iloc[:, cols].values / np.sqrt(self.count().iloc[:, cols]).values
)
return result
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self) -> FrameOrSeriesUnion:
"""
Compute group sizes.
Returns
-------
DataFrame or Series
Number of rows in each group as a Series if as_index is True
or a DataFrame if as_index is False.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
if not self.as_index:
result = result.rename("size").reset_index()
return self._reindex_output(result, fill_value=0)
@doc(_groupby_agg_method_template, fname="sum", no=True, mc=0)
def sum(self, numeric_only: bool = True, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="add", npfunc=np.sum
)
@doc(_groupby_agg_method_template, fname="prod", no=True, mc=0)
def prod(self, numeric_only: bool = True, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
)
@doc(_groupby_agg_method_template, fname="min", no=False, mc=-1)
def min(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="min", npfunc=np.min
)
@doc(_groupby_agg_method_template, fname="max", no=False, mc=-1)
def max(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="max", npfunc=np.max
)
@doc(_groupby_agg_method_template, fname="first", no=False, mc=-1)
def first(self, numeric_only: bool = False, min_count: int = -1):
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
"""Helper function for first item that isn't NA.
"""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="first",
npfunc=first_compat,
)
@doc(_groupby_agg_method_template, fname="last", no=False, mc=-1)
def last(self, numeric_only: bool = False, min_count: int = -1):
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
"""Helper function for last item that isn't NA.
"""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="last",
npfunc=last_compat,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
numeric_only=False,
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len)._values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
numeric_only=False,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name="groupby")
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform(
"rank",
numeric_only=False,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform("cumprod", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform("cumsum", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummax(self, axis=0, **kwargs):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
def _get_cythonized_result(
self,
how: str,
cython_dtype: np.dtype,
aggregate: bool = False,
numeric_only: bool = True,
needs_counts: bool = False,
needs_values: bool = False,
needs_2d: bool = False,
min_count: Optional[int] = None,
needs_mask: bool = False,
needs_ngroups: bool = False,
result_is_index: bool = False,
pre_processing=None,
post_processing=None,
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
numeric_only : bool, default True
Whether only numeric datatypes should be computed
needs_counts : bool, default False
Whether the counts should be a part of the Cython call
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_2d : bool, default False
Whether the values and result of the Cython call signature
are 2-dimensional.
min_count : int, default None
When not None, min_count for the Cython call
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. This function is also responsible for
raising a TypeError if the values have an invalid type. Raises
if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(post_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
"Cannot use 'pre_processing' without specifying 'needs_values'!"
)
grouper = self.grouper
labels, _, ngroups = grouper.group_info
output: Dict[base.OutputKey, np.ndarray] = {}
base_func = getattr(libgroupby, how)
error_msg = ""
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
values = obj._values
if numeric_only and not is_numeric_dtype(values):
continue
if aggregate:
result_sz = ngroups
else:
result_sz = len(values)
result = np.zeros(result_sz, dtype=cython_dtype)
if needs_2d:
result = result.reshape((-1, 1))
func = partial(base_func, result)
inferences = None
if needs_counts:
counts = np.zeros(self.ngroups, dtype=np.int64)
func = partial(func, counts)
if needs_values:
vals = values
if pre_processing:
try:
vals, inferences = pre_processing(vals)
except TypeError as e:
error_msg = str(e)
continue
if needs_2d:
vals = vals.reshape((-1, 1))
vals = vals.astype(cython_dtype, copy=False)
func = partial(func, vals)
func = partial(func, labels)
if min_count is not None:
func = partial(func, min_count)
if needs_mask:
mask = isna(values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if needs_2d:
result = result.reshape(-1)
if result_is_index:
result = algorithms.take_nd(values, result)
if post_processing:
result = post_processing(result, inferences)
key = base.OutputKey(label=name, position=idx)
output[key] = result
# error_msg is "" on an frame/series with no rows or columns
if len(output) == 0 and error_msg != "":
raise TypeError(error_msg)
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
If freq is passed, the index will be increased using the periods and the freq.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, optional
Frequency string.
axis : axis to shift, default 0
Shift direction.
fill_value : optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Object shifted within each group.
See Also
--------
Index.shift : Shift values of Index.
tshift : Shift the time index, using the index’s frequency
if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
return self._get_cythonized_result(
"group_shift_indexer",
numeric_only=False,
cython_dtype=np.dtype(np.int64),
needs_ngroups=True,
result_is_index=True,
periods=periods,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0):
"""
Calculate pct_change of each value to previous entry in group.
Returns
-------
Series or DataFrame
Percentage changes within each group.
"""
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def tail(self, n=5):
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
def _reindex_output(
self, output: OutputFrameOrSeries, fill_value: Scalar = np.NaN
) -> OutputFrameOrSeries:
"""
If we have categorical groupers, then we might want to make sure that
we have a fully re-indexed output to the levels. This means expanding
the output space to accommodate all values in the cartesian product of
our groups, regardless of whether they were observed in the data or
not. This will expand the output space if there are missing groups.
The method returns early without modifying the input if the number of
groupings is less than 2, self.observed == True or none of the groupers
are categorical.
Parameters
----------
output : Series or DataFrame
Object resulting from grouping and applying an operation.
fill_value : scalar, default np.NaN
Value to use for unobserved categories if self.observed is False.
Returns
-------
Series or DataFrame
Object (potentially) re-indexed to include all possible groups.
"""
groupings = self.grouper.groupings
if groupings is None:
return output
elif len(groupings) == 1:
return output
# if we only care about the observed values
# we are done
elif self.observed:
return output
# reindexing only applies to a Categorical grouper
elif not any(
isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings
):
return output
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names
).sortlevel()
if self.as_index:
d = {
self.obj._get_axis_name(self.axis): index,
"copy": False,
"fill_value": fill_value,
}
return output.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `output`. An idea is to do:
# output = output.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `output`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = (
(i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis
)
g_nums, g_names = zip(*in_axis_grps)
output = output.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
output = output.set_index(self.grouper.result_index).reindex(
index, copy=False, fill_value=fill_value
)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
output = output.reset_index(level=g_nums)
return output.reset_index(drop=True)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Optional[Union[Sequence, Series]] = None,
random_state=None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
"""
from pandas.core.reshape.concat import concat
if weights is not None:
weights = Series(weights, index=self._selected_obj.index)
ws = [weights[idx] for idx in self.indices.values()]
else:
ws = [None] * self.ngroups
if random_state is not None:
random_state = com.random_state(random_state)
samples = [
obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
for (_, obj), w in zip(self, ws)
]
return concat(samples, axis=self.axis)
@doc(GroupBy)
def get_groupby(
obj: NDFrame,
by: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
) -> GroupBy:
klass: Type[GroupBy]
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else:
raise TypeError(f"invalid type: {obj}")
return klass(
obj=obj,
keys=by,
axis=axis,
level=level,
grouper=grouper,
exclusions=exclusions,
selection=selection,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
mutated=mutated,
dropna=dropna,
)
| [
"prafulpar"
] | prafulpar |
e4261450da05009ae1e965dc60840264ffe2a1e9 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_list_nested-32.py | 8e5e853af19d4b97d79508438d9d61dc0c5fdca2 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x:int = 0
y:int = 0
z:[int] = None
$ID = [1, 2, 3]
for x in z:
for y in z:
print(x * y)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
a32469f383f73a5e739265066cfc490c1b37b63d | 82eba08b9a7ee1bd1a5f83c3176bf3c0826a3a32 | /ZmailServer/src/python/pylibs/conf.py | 7077b51dcd1f57e8b2f178471fe6c88beda587ce | [
"MIT"
] | permissive | keramist/zmailserver | d01187fb6086bf3784fe180bea2e1c0854c83f3f | 762642b77c8f559a57e93c9f89b1473d6858c159 | refs/heads/master | 2021-01-21T05:56:25.642425 | 2013-10-21T11:27:05 | 2013-10-22T12:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | #
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2010, 2011, 2012 VMware, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
import os
class Config:
def __init__(self):
self.config = {}
self.progname = "zmconfigd"
if (os.getenv("zmail_server_hostname") is not None):
self.hostname = os.getenv("zmail_server_hostname")
else:
self.hostname = os.popen("/opt/zmail/bin/zmhostname").readline().strip()
if (self.hostname is None or self.hostname == ""):
os._exit(1)
self.wd_all = False
self.debug = False
self.baseDir = "/opt/zmail"
self.logStatus = {
4 : "Debug",
3 : "Info",
2 : "Warning",
1 : "Error",
0 : "Fatal"
}
self.configFile = self.baseDir+"/conf/zmconfigd.cf";
self.logFile = self.baseDir+"/log/"+self.progname+".log";
self.pidFile = self.baseDir+"/log/"+self.progname+".pid";
self.interval = 60
if self.debug:
self.interval = 10
self.restartconfig = False
self.watchdog = True
self.wd_list = [ "antivirus" ]
self.loglevel = 3
def __setitem__(self, key, val):
self.config[key] = val
def __getitem__(self, key):
try:
return self.config[key]
except Exception, e:
return None
def setVals(self, state):
self.ldap_is_master = state.localconfig["ldap_is_master"]
self.ldap_root_password = state.localconfig["ldap_root_password"]
self.ldap_master_url = state.localconfig["ldap_master_url"]
self.loglevel = 3
if state.localconfig["ldap_starttls_required"] is not None:
self.ldap_starttls_required = (state.localconfig["ldap_starttls_required"].upper() != "FALSE")
if state.localconfig["zmconfigd_log_level"] is not None:
self.loglevel = int(state.localconfig["zmconfigd_log_level"])
self.interval = 60
if state.localconfig["zmconfigd_interval"] is not None and state.localconfig["zmconfigd_interval"] != "":
self.interval = int(state.localconfig["zmconfigd_interval"])
self.debug = False
if state.localconfig["zmconfigd_debug"] is not None:
self.debug = state.localconfig["zmconfigd_debug"]
if state.localconfig["zmconfigd_watchdog"] is not None:
self.watchdog = (state.localconfig["zmconfigd_watchdog"].upper() != "FALSE")
if state.localconfig["zmconfigd_enable_config_restarts"] is not None:
self.restartconfig = (state.localconfig["zmconfigd_enable_config_restarts"].upper() != "FALSE")
if state.localconfig["zmconfigd_watchdog_services"] is not None:
self.wd_list = state.localconfig["zmconfigd_watchdog_services"].split()
| [
"bourgerie.quentin@gmail.com"
] | bourgerie.quentin@gmail.com |
4316cce4e442be02266ab44c5875235bff98b3af | 62244ade4bfc240d45612b5aee8db70815e5aaea | /application/routes.py | d83ff1a106a3d6b1990ab5e7b9b3103267fd578f | [] | no_license | Michiboi29/testtuto | 6c9a0f0afa7be5a1b4798a6d0276fee065f3a16c | 46bf4e861ef005f56c3c77309474591105b02108 | refs/heads/master | 2022-04-15T06:52:22.335495 | 2020-03-20T05:47:27 | 2020-03-20T05:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | from application import app
from flask import render_template, flash, redirect, url_for, request
from application.forms import LoginForm
from Api.sql_fonctions import sql_read, sql_insert
@app.route('/')
@app.route('/index')
def index():
toi = LoginForm().username.data
if toi is None:
toi = 'inconnu'
global user
user = {'username': toi}
sql_insert('ami', prenom='etienne', nom='mich', age=20)
sql_insert('ami', prenom='bob', sex='M')
amis = sql_read('ami', distinct=True)
return render_template('index.html', title='Home', user=user, amis=amis)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash("Yo {} essai de s'inscrire, je me souvien tu d'une âme ? {}".format(
form.username.data, form.remember_me.data))
return index()
return render_template('login.html', title='Insciption', form=form)
| [
"emichaud29@gmail.com"
] | emichaud29@gmail.com |
1a4a25bdcf44d7428170dabdff37efd293eeb2bd | 290c4de61df3c6c1abe3bdf0203145fbe520225c | /celestia/utility/config.py | 1045050afb8152d5fa1e78d2a4ac779c33a1158a | [
"BSD-2-Clause"
] | permissive | kidaak/CelestiaSunrise | e1634b9daf620528a9713ba01a4561de8fa82170 | 5ace8cbf517e09e198a62e6dd1733de588ae2cfb | refs/heads/master | 2021-01-24T21:19:47.927620 | 2015-08-01T03:52:38 | 2015-08-01T03:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: config.py
# by Arzaroth Lekva
# arzaroth@arzaroth.com
#
import sys
import json
from clint import resources
CONFIG_FILE = "config.json"
BASE_CONFIG = {
"startup_check": False
}
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class Config(object):
_config = None
@ClassProperty
@classmethod
def config(cls):
if cls._config is None:
config = resources.user.read(CONFIG_FILE)
if config is None:
config = BASE_CONFIG
cls.commit()
else:
config = json.loads(config)
cls._config = config
return cls._config
@classmethod
def commit(cls):
resources.user.write(CONFIG_FILE, json.dumps(cls.config))
resources.init('Arzaroth', 'CelesiaSunrise')
| [
"lekva@arzaroth.com"
] | lekva@arzaroth.com |
574587cc19a341530f3d079bed48ec4e649f274e | 07efa2ec19ec3a4090e20be1dff7576a98fd341c | /Project Euler/p36.py | 94de73e6225875ad388c90bf18f65be784051b97 | [] | no_license | raywong220/Backyard | ff3678d208857b63728fd9ebe744884b00ee947b | eab91b3c122aa24c68fc547c485ed3681c8bf5b1 | refs/heads/master | 2023-02-03T16:52:52.141134 | 2020-12-21T11:06:49 | 2020-12-21T11:06:49 | 254,303,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
# Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
from time import process_time
sum = 0
# Since even number always ends with 0 in binary, not Palindromic
for i in range(1, 1000001, 2):
b = bin(i)[2:]
if str(i) == str(i)[::-1] and b == b[::-1]:
sum += i
print(sum)
print(process_time()) | [
"noreply@github.com"
] | noreply@github.com |
56ab994254b3b1de4c46198dd4067152d1c0b8b9 | 47703c8cfd6b6cbbec7ceb2509da1bc049dd621f | /udoy_013.py | de28dafdc46a3bbd08f2137b5bbcbf693cf22f3f | [] | no_license | udoy382/PyCode | 0638a646bd4cac4095a58135aea97ba4ccfd5535 | 69efde580f019cd41061501554b6193688a0a06f | refs/heads/main | 2023-03-26T17:45:15.943887 | 2021-03-25T14:22:42 | 2021-03-25T14:22:42 | 324,485,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # Short Hand If Else Notation In Python #22
a = int(input("enter a\n"))
b = int(input("enter b\n"))
# 1st
# if a>b: print("A B se bada hai bhai")
# 2nd
# print("B A se bada hai bhai") if a<b else print("A B se bada hai bhai") | [
"srudoy436@gmail.com"
] | srudoy436@gmail.com |
c93bf244e3f0b22a16f1e79d6056e4dbe376e1f5 | acba9be504eb44718edd8daa10bd7e36e6d9f3ee | /dstbasic/l4_uname_file.py | 1b9dc4220b8027c17d931d4763e74bb5dc1f46dc | [] | no_license | coderdr31/DNote | 48901b3bb7fb38b0e041eb6f33d547ac44f31b78 | 23d43792befd6457a68f1d3f986d72fdfcdffdad | refs/heads/master | 2021-10-08T02:33:02.484256 | 2018-12-06T14:03:16 | 2018-12-06T14:03:16 | 114,640,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | #!/usr/bin/python3
# coding: utf-8
##################################################################
## uname 查看版本
uname -m # i686-32位系统, x86_64-64位系统
##################################################################
## file 判断文件类型和文件编码格式
file -b # 列出文件辨识结果时,不显示文件名称
| [
"1683751393@qq.com"
] | 1683751393@qq.com |
bb9898b19c06b48df34befa014ed0e706c793736 | c88e82e24d79d4e6eed5313883239ee5cf7fab8d | /untitled/manage.py | e2803dae0e77da757ffafab6cc275fa3a5728fad | [
"MIT"
] | permissive | SamuelPhases/Django-bootcamp | b0a5274eeef85b31362bf1577a13eea5fd7b6692 | ff6dbeb9e1470fe1d003c0e170426bfaf7912060 | refs/heads/master | 2020-08-05T16:45:09.751611 | 2019-10-22T16:14:52 | 2019-10-22T16:14:52 | 212,619,651 | 0 | 0 | MIT | 2019-10-22T16:14:53 | 2019-10-03T15:55:53 | Python | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'untitled.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"obajisamuelekene@gmail.com"
] | obajisamuelekene@gmail.com |
b6415094da921188a6c07160bf88440442a8f16d | 049e2fab5e9e8f248e537cbada15d60d60536990 | /environment/env_multi.py | be5637dedf9dd6ef8320973bbc255ebc9740da5c | [
"MIT"
] | permissive | RubenPants/RobotSimulator2D | adfd8c16ec48b34419cae096d16e5e6714410407 | 334d7b9cab0edb22d4670cfaf39fbed76c351758 | refs/heads/master | 2023-05-14T20:09:44.604695 | 2020-07-11T14:16:58 | 2020-07-11T14:16:58 | 223,198,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,070 | py | """
env_multi.py
Environment where a single genome gets evaluated over multiple games. This environment will be called in a process.
"""
import sys
from config import Config
from environment.entities.game import get_game
from population.population import Population
from utils.dictionary import D_DONE, D_SENSOR_LIST
class MultiEnvironment:
""" This class provides an environment to evaluate a single genome on multiple games. """
__slots__ = {
"batch_size", "game_config", "games", "make_net", "max_steps", "pop_config", "query_net",
}
def __init__(self,
make_net,
query_net,
game_config: Config,
pop_config: Config,
):
"""
Create an environment in which the genomes get evaluated across different games.
:param make_net: Method to create a network based on the given genome
:param query_net: Method to evaluate the network given the current state
:param game_config: Config file for game-creation
:param pop_config: Config file specifying how genome's network will be made
"""
self.batch_size = 0
self.games = []
self.make_net = make_net
self.max_steps = game_config.game.duration * game_config.game.fps
self.query_net = query_net
self.game_config = game_config
self.pop_config = pop_config
def eval_genome(self,
genome,
return_dict=None,
):
"""
Evaluate a single genome in a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return observations corresponding the genome
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Ignore if game has finished
if not f:
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Stop if agent reached target in all the games
if all(finished): break
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = [g.close() for g in games]
def trace_genome(self,
genome,
return_dict=None,
):
"""
Get the trace of a single genome for a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return the traces corresponding the genome-game combination
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Initialize the traces
traces = [[g.player.pos.get_tuple()] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Do not advance the player if target is reached
if f:
traces.append(g.player.pos.get_tuple())
continue
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Update the trace
traces[i].append(g.player.pos.get_tuple())
# Next step
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = traces
# -----------------------------------------------> HELPER METHODS <----------------------------------------------- #
def set_games(self, games):
"""
Set the games-set with new games.
:param games: List of Game-IDs
"""
self.games = games
self.batch_size = len(games)
def get_game_params(self):
"""Return list of all game-parameters currently in self.games."""
return [get_game(i, cfg=self.game_config).game_params() for i in self.games]
def get_multi_env(pop: Population, game_config: Config):
"""Create a multi-environment used to evaluate a population on."""
if sys.platform == 'linux':
from environment.cy.env_multi_cy import MultiEnvironmentCy
return MultiEnvironmentCy(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
else:
return MultiEnvironment(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
| [
"broekxruben@gmail.com"
] | broekxruben@gmail.com |
84fcbd3b1daafb67a7a0524e66461a5381ad7ced | b10c133ad4dea2635d405a4c19d663f9ac0ccafe | /testrobot/test.py | 40967633caa040b45d3c30ffecb501d31f2d41cc | [] | no_license | andyandymike/pythonArea | 45da924a6286e617bfddb2cae7dab9b23df24f88 | bdbe86dc375981e9163571ca9efa1f69c7f56700 | refs/heads/master | 2020-04-10T05:33:30.390914 | 2017-12-21T06:36:55 | 2017-12-21T06:36:55 | 68,115,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | class Chain(object):
def __init__(self, path='GET '):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __call__(self,path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain().users('michael').group('student').repos)
class test(Chain):
def __init__(self):
super(test,self).__init__() | [
"564458508@qq.com"
] | 564458508@qq.com |
3c3531fcca3bf7881653a8e2610025a878c3f1a6 | d6e455eea3b83eec62d90b9e604ef7edc1de9e82 | /convolve/convolve_image.py | 6914c742059f29b754c15703d599151fc4e007b8 | [
"Apache-2.0"
] | permissive | xcamilox/frastro | 5fbb2b0ed86a7d348b46ea899ebc11efc452c419 | 437236350277e986d1c1787d9ff35c9ebbe09513 | refs/heads/master | 2023-05-25T12:30:20.186121 | 2023-05-21T16:10:04 | 2023-05-21T16:10:04 | 143,422,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,703 | py | from astropy.wcs import wcs
from scipy import signal
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
from scipy.ndimage import gaussian_filter
from frastro import PSFexUtil, ImageUtils, FITSFile
from frastro.core.data.photometry.photometry_util import PhotometryUtil
from photutils import create_matching_kernel
from photutils import (HanningWindow, TukeyWindow, CosineBellWindow,
SplitCosineBellWindow, TopHatWindow)
from astropy.convolution import convolve
class ConvolveImage():
def __init__(self):
pass
def convolveImage_scipy(self,np_data,np_kernel,mode="same",method="auto"):
return signal.convolve(np_data, np_kernel, mode=mode, method=method)
def convolveImage_astropy(self,np_data,np_kernel):
return convolve(np_data,np_kernel)
#Convolved Image using both methods, astropy convolve and scipy
def convolveImage(self,np_data,np_kernel):
scipy = self.convolveImage_scipy(np_data,np_kernel)
astropy = self.convolveImage_astropy(np_data, np_kernel)
return scipy, astropy
#alpha and beta parameters only are required for turkey,cosinebell, splitcosinebell, and tophat functions
def createKernel(self,windows_funtion="cosinebell",alpha=0.5,beta=0.3):
wfuntion = windows_funtion.lower()
if wfuntion == "hanning":
w = HanningWindow()
if wfuntion == "tukey":
w = TukeyWindow(alpha=alpha)
if wfuntion == "cosinebell":
w = CosineBellWindow(alpha=alpha)
if wfuntion == "splitcosinebell":
w = SplitCosineBellWindow(alpha=alpha, beta=beta)
if wfuntion == "tophat":
w = TopHatWindow(beta=beta)
return w
def convolve_img(image, kernel, padding="same", stride=1):
image_h = image.shape[0]
image_w = image.shape[1]
kernel_h = kernel.shape[0]
kernel_w = kernel.shape[1]
h = kernel_h // 2
w = kernel_h // 2
if padding == "same":
padding = (kernel_w - 1) / 2
size = (int(image_h + padding), int(image_w + padding))
image_ref = np.zeros(size)
image_conv = np.zeros(image.shape)
for i in range(image_h):
for j in range(image_w):
image_ref[i + h][j + h] = image[i][j]
for i in range(image_h - h):
for j in range(image_w - w):
sum = 0
for m in range(kernel_h):
for n in range(kernel_w):
sum = sum + kernel[m][n] * image_ref[i + m][j + n]
image_conv[i][j] = sum
return image_conv
def convolve_image():
# SLACSJ0157-0056
ra = 29.495583333333332
dec = -0.9405833333333322
ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-09-19/science/stack_best.fits"
ref_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-09-19/science/stack_best_crop.fits"
psf_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-09-19/science/psf_best.fits"
cat_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-09-19/science/catalog_best_deep.fits"
zp = 29.81058550612531
mag = 19.549888610839844
flux_ref = 12713.88
# disaline
# ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-10-17/science/stack_best.fits"
# ref_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-10-17/science/stack_best_crop.fits"
# psf_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-10-17/science/psf_best.fits"
zp1 = 30.381520940420206
mag1 = 19.457260131835938
target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2019-01-01/science/stack_best.fits"
target_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2019-01-01/science/stack_best_crop.fits"
psf_target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2019-01-01/science/psf_best.fits"
zp2 = 30.304233586279405
mag2 = 19.44410514831543
flux_target = 22082.65
# BELLSJ1221+3806
ra = 185.466325546679
dec = 38.1029236161019
ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-03-05/science/stack_best.fits"
ref_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-03-05/science/stack_best_crop.fits"
psf_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-03-05/science/psf_best.fits"
zp = 30.2217960357666
mag = 20.398475646972656
flux_ref = 8498.216796875
seeing_ref = 1.439271068572998
target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-02-01/science/stack_best.fits"
target_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-02-01/science/stack_best_crop.fits"
psf_target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/BELLSJ1221+3806/observations/2019-02-01/science/psf_best.fits"
zp2 = 30.256074905395508
mag2 = 20.425241470336914
flux_target = 8557.240234375
seeing_target = 1.4405250549316406
psf = PSFexUtil()
ref_output_psf = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/SLACSJ0157-0056/observations/2018-09-19/science/psf_best_img.fits"
# S4TMJ1051+4439
ra = 162.78920833333333
dec = 44.65236388888888
ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/astrometry/good/swarp_2018-11-20_1.fits"
ref_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/astrometry/good/2018-11-20_1_crop.fits"
psf_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2018-11-20/science/psf_best.fits"
cat_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2018-11-20/science/catalog_best_deep.fits"
zp = 30.010107046117383
mag = 17.317863
flux_ref = 119370.54
seeing_ref = 1.3008779525756835
ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-04-23/science/stack_best.fits"
ref_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-04-23/science/stack_best_crop.fits"
psf_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-04-23/science/psf_best.fits"
cat_ref = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-04-23/science/catalog_best_deep.fits"
zp2 = 30.259170532226562
mag2 = 17.600826
flux_target = 115701.07
seeing_target = 1.0545900106430053
target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-05-01/science/stack_best.fits"
target_crop = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-05-01/science/stack_best_crop.fits"
psf_target = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/2019-05-01/science/psf_best.fits"
zp2 = 30.20319175720215
mag2 = 17.57571029663086
flux_target = 112458.59375
seeing_target = 0.8817330121994018
scale = calcScale(cat_ref, ref, target)
ref_file = fits.open(ref)[0].data
ref_file = ref_file - ref_file.mean()
sky_ref = ref_file.mean()
sky_max = ref_file.max()
sky_std_ref = ref_file.std()
target_file = fits.open(target)[0].data
target_file = target_file - target_file.mean()
target_sky = target_file.mean()
target_max = target_file.max()
target_std_ref = target_file.std()
print("ref", "sky", sky_ref, "max", sky_max, "std", sky_std_ref)
print("target", "sky", target_sky, "max", target_max, "std", target_std_ref)
psf = PSFexUtil()
ref_output_psf = psf_ref[:-5] + "_img.fits"
psf.saveSinglePSFFile(psf_ref, ref_output_psf)
target_output_psf = psf_target[:-5] + "_img.fits"
psf.saveSinglePSFFile(psf_target, target_output_psf)
psf_ref_org = fits.open(ref_output_psf)[0].data
psf_ref_img = psf_ref_org
psf_target_img = fits.open(target_output_psf)[0].data
fov = 80
pixel = ImageUtils.getPixelFromCoordinate(ref, ra, dec)
crop_ref = ImageUtils.imcopy(ref, ref_crop, (pixel[0][0], pixel[0][1]), (fov, fov))
pixel_target = ImageUtils.getPixelFromCoordinate(target, ra, dec)
crop_target = ImageUtils.imcopy(target, target_crop, (pixel_target[0][0], pixel_target[0][1]), (fov, fov))
# crop_ref=crop_ref-crop_ref.mean()
ref_total_flux = crop_ref.sum()
mag_ref = -2.5 * np.log10(ref_total_flux / zp)
crop_ref_mean = crop_ref.mean()
crop_ref_std = crop_ref.std()
# crop_target = crop_target-crop_target.mean()
target_total_flux = crop_target.sum()
mag_target = -2.5 * np.log10(target_total_flux / zp2)
crop_target_mean = crop_target.mean()
crop_target_std = crop_target.std()
ref_img = fits.open(ref_crop)[0].data
target_img = fits.open(target_crop)[0].data
sky_refcrop = crop_ref.mean()
sky_targetcrop = crop_target.mean()
std_refcrop = crop_ref.std()
std_targetcrop = crop_target.std()
print("STD ref", std_refcrop)
print("STD target", std_targetcrop)
w1 = HanningWindow()
w2 = TukeyWindow(alpha=0.5)
w3 = CosineBellWindow(alpha=0.5)
w4 = SplitCosineBellWindow(alpha=0.4, beta=0.3)
w5 = TopHatWindow(beta=0.4)
kernel_HanningWindow = create_matching_kernel(psf_ref_img, psf_target_img, window=w1)
kernel_TukeyWindow = create_matching_kernel(psf_ref_img, psf_target_img, window=w2)
kernel_CosineBellWindow = create_matching_kernel(psf_ref_img, psf_target_img, window=w3)
kernel_SplitCosineBellWindow = create_matching_kernel(psf_ref_img, psf_target_img, window=w4)
kernel_TopHatWindow = create_matching_kernel(psf_ref_img, psf_target_img, window=w5)
diff_simple = crop_ref - crop_target
sky_diff = sky_targetcrop - sky_refcrop
# crop_ref += sky_diff
psf_conv = signal.convolve(psf_ref_img, psf_target_img, mode='same', method="auto")
gaussian_filter_diff = seeing_target - seeing_ref
blur_conv = gaussian_filter(crop_ref, sigma=gaussian_filter_diff)
img_conv = signal.convolve(crop_ref, psf_conv , mode='same', method="auto")
target_conv = signal.convolve(crop_target, psf_conv, mode='same', method="auto")
header_ref_crop = FITSFile.header(ref_crop)[0]
header_target_crop = FITSFile.header(target_crop)[0]
conv_ref_path = ref_crop[:-5] + "_conv.fits"
conv_target_path = target_crop[:-5] + "_conv.fits"
# save crop reference image convolved with target psf
FITSFile.saveFile(conv_ref_path, img_conv, header_ref_crop)
# save crop target image convolved with ref psf
FITSFile.saveFile(conv_target_path, target_conv, header_target_crop)
scale = calcScale(cat_ref, conv_ref_path, conv_target_path)
print("scale",scale)
w_ref = wcs.WCS(header_ref_crop)
w_target = wcs.WCS(header_target_crop)
diff=np.zeros(img_conv.shape)
for i in range(img_conv.shape[0]):
for j in range(img_conv.shape[1]):
ra_dec = w_ref.wcs_pix2world([[i,j]],1)
pixcrd2 = w_target.wcs_world2pix(ra_dec, 1)[0]
#Dij=Iij-Mij
#print(i,j, int(round(pixcrd2[0])),int(round(pixcrd2[1])))
diff[i,j]=target_conv[int(round(pixcrd2[0])),int(round(pixcrd2[1]))]-(img_conv[i,j]*scale)
diference = img_conv - target_conv
diference_out = img_conv - crop_target
conv_ref_path_diff = conv_ref_path[:-5] + "_diff.fits"
# residuos from conv ref - conv target
FITSFile.saveFile(conv_ref_path_diff, diff, header_ref_crop)
psf_conv_path = "/Users/cjimenez/Documents/PHD/data/liverpool_lens/S4TMJ1051+4439/observations/astrometry/good/psf_conv.fits"
header_con_psf = FITSFile.header(psf_target)[0]
FITSFile.saveFile(psf_conv_path, psf_conv, header_con_psf)
print(conv_target_path)
print(conv_ref_path)
print(conv_ref_path_diff)
print(psf_conv_path)
#made plots
#def makePlot():
print(conv_ref_path, conv_target_path, conv_ref_path_diff)
plt.clf()
plt.imshow(psf_ref_img)
plt.title("psf ref")
print("sum", psf_ref_img.sum())
plt.colorbar()
plt.show()
plt.imshow(psf_target_img)
plt.title("psf target")
plt.colorbar()
plt.show()
# plt.imshow(psf_conv)
# plt.title("psf convo ref-target")
# plt.colorbar()
# plt.show()
plt.imshow(crop_ref)
plt.title("Reference Image")
plt.colorbar()
plt.show()
plt.imshow(blur_conv)
plt.title("Image gausean")
plt.colorbar()
plt.show()
plt.imshow(crop_target)
plt.title("Target Image")
plt.colorbar()
plt.show()
plt.imshow(img_conv)
plt.title("Ref convol psf-target")
plt.colorbar()
plt.show()
plt.imshow(diference_out)
plt.title("Diference ref-conv-psf-target - target")
plt.colorbar()
plt.show()
plt.imshow(diff)
plt.title("Diference ref-conv-psf-target - target-conv-psf-ref")
plt.colorbar()
plt.show()
"""
plt.imshow(diff_simple)
plt.colorbar()
plt.show()
"""
def readCatalog(cat_path):
hdul = FITSFile.open(cat_path)
catalog = hdul[1].data
return catalog
def calcScale(cat_ref, img_ref, img_target):
print(cat_ref)
ref_cat = readCatalog(cat_ref)
filter = ref_cat["CLASS_STAR"] > 0.8
catalog_ref = ref_cat[filter]
filter = catalog_ref["MAG_AUTO"] > 15.
catalog_ref = catalog_ref[filter]
filter = catalog_ref["BACKGROUND"] < 0.09
catalog_ref = catalog_ref[filter]
# image_ref=FITSFile.open(img_ref)
ref_photometry = PhotometryUtil.doPhotometry(ra=catalog_ref["ALPHA_J2000"], dec=catalog_ref["DELTA_J2000"],
image=img_ref, r_arcsec_aperture=5)
# image_target = FITSFile.open(img_target)
target_photometry = PhotometryUtil.doPhotometry(ra=catalog_ref["ALPHA_J2000"], dec=catalog_ref["DELTA_J2000"],
image=img_target, r_arcsec_aperture=5)
ra = 162.78920833333333
dec = 44.65236388888888
# image_ref=FITSFile.open(img_ref)
source_ref_photometry = PhotometryUtil.doPhotometry(ra=[ra], dec=[dec],
image=img_ref, r_arcsec_aperture=5)
# image_target = FITSFile.open(img_target)
source_target_photometry = PhotometryUtil.doPhotometry(ra=[ra], dec=[dec],
image=img_target, r_arcsec_aperture=5)
xcenter = ref_photometry['xcenter'] - target_photometry['xcenter']
ycenter = ref_photometry['ycenter'] - target_photometry['ycenter']
aperture_sum = target_photometry['aperture_sum'] / ref_photometry['aperture_sum']
print(aperture_sum)
print(np.mean(aperture_sum),np.median(aperture_sum),np.std(aperture_sum))
list =target_photometry['aperture_sum'] / ref_photometry['aperture_sum']
scale = np.mean(list)
scale_obj=source_target_photometry['aperture_sum'] / source_ref_photometry['aperture_sum']
print(np.mean(xcenter))
print(np.mean(ycenter))
print(np.mean(ref_photometry['aperture_sum']), np.mean(target_photometry['aperture_sum']), np.mean(aperture_sum))
return scale_obj
if __name__ == "__main__":
convolve_image()
| [
"camilojimenez@Camilos-MacBook-Pro.local"
] | camilojimenez@Camilos-MacBook-Pro.local |
8b270e8421c100145ef9f17d0acf3038abfbca70 | 8ee17f972d788231ea9f3ac40f1596e22834726a | /3rd/boost_1_66_0/tools/build/test/project_root_rule.py | a123d1ab31e980d911f4f9d869162f33a1ad617c | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tulingwangbo/cefMultiBrowser | 585a04ee2903aad5ade8b32decf4e28391b1b1f1 | 459a459021da080d39a518bfe4417f69b689342f | refs/heads/master | 2020-04-04T16:36:51.262515 | 2018-11-04T14:38:06 | 2018-11-04T14:38:06 | 156,085,167 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2005.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that we can declare a rule in Jamroot that will be can be called in
# child Jamfile to declare a target. Specifically test for use of 'glob' in that
# rule.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """
project : requirements <link>static ;
rule my-lib ( name ) { lib $(name) : [ glob *.cpp ] ; }
""")
t.write("sub/a.cpp", """
""")
t.write("sub/jamfile.jam", """
my-lib foo ;
""")
t.run_build_system(subdir="sub")
t.expect_addition("sub/bin/$toolset/debug/link-static*/foo.lib")
t.cleanup()
| [
"tulingwangbo@163.com"
] | tulingwangbo@163.com |
7a9f19d6dac53a9f4248d28a1a6b381ab89c9614 | 120adb67cd33c128025793c37de4ba7b2f2d9f3e | /todolist/todoapp/tests/test_views.py | 14544f6d209b374424eca2edc3e6cc85b2c9006d | [] | no_license | ga0125/todolist-2 | 5da0fd61a0a0332929c9559181133914173387cf | dd78879eacd1796a931c4f8ca4c0b6b523e17a88 | refs/heads/master | 2022-12-10T17:34:23.063327 | 2019-09-17T16:16:40 | 2019-09-17T16:16:40 | 209,096,283 | 0 | 0 | null | 2022-12-08T06:10:24 | 2019-09-17T15:54:00 | CSS | UTF-8 | Python | false | false | 1,509 | py | from django.test import TestCase, Client
from django.urls import reverse
from datetime import datetime
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.task_url = reverse('api')
self.add_task_url = reverse('add-task')
self.done_task_url = reverse('done-task')
self.filed_task_url = reverse('filed-task')
self.del_task_url = reverse('del-task')
def test_task_list_GET(self):
response = self.client.get(self.task_url)
self.assertEquals(response.status_code, 200)
def test_add_task_POST(self):
response = self.client.post(self.add_task_url, {
'title': 'Unit test',
'desc': 'Testing the APIs calls by unit test',
'deadline': '2019-09-14',
})
self.assertEquals(response.status_code, 302)
def test_done_task_POST(self):
date = datetime.now()
response = self.client.post(self.done_task_url, {
'id': 1,
'complete': True,
'completed_at': date
})
self.assertEquals(response.status_code, 201)
def test_filed_task_POST(self):
response = self.client.post(self.filed_task_url, {
'id': 1,
'filed': True
})
self.assertEquals(response.status_code, 201)
def test_del_task_DEL(self):
response = self.client.delete(self.del_task_url, {
'id': 1
})
self.assertEquals(response.status_code, 200)
| [
"gabriel.zbrg@gmail.com"
] | gabriel.zbrg@gmail.com |
45b817d4a75f46e4e626eb9c9fb88a7376806c4e | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/videoplayer/__init__.py | b55a1624352086133c05f65c066095386a59df16 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\videoplayer\__init__.py
from _videoplayer import * | [
"victorique.de.blois@asu.edu"
] | victorique.de.blois@asu.edu |
0fad881d4404f4193a1f47164fedb70cc2bd82b9 | 551a047ce33028e2d62d11cdd98dd9882ceed641 | /kic/kic/urls.py | 3e45e00a1753f7ea83aec940ec8e53cfc944319c | [] | no_license | AMfalme/Kids-In-Charge | 7b03f05d6979b85a797bed535bf15ecaff4bc7e4 | 73d617e931cee5f0813a7c7d4bf5b030b514fe1d | refs/heads/master | 2023-03-12T09:03:14.968311 | 2021-03-06T12:43:52 | 2021-03-06T12:43:52 | 288,979,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from search import views as search_views
from contact import views as contact_views
urlpatterns = [
url(r'^django-admin/', admin.site.urls),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search_views.search, name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns = urlpatterns + [
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
url(r"", include(wagtail_urls)),
# Alternatively, if you want Wagtail pages to be served from a subpath
# of your site, rather than the site root:
# url(r"^pages/", include(wagtail_urls)),
]
| [
"gmfalme@mizizi.io"
] | gmfalme@mizizi.io |
16deb8404da8048f8c259386524f2b4355b34af3 | 075a9186a43041b062ce883604a125484db64c71 | /plot_tools/twitch_ca_overlay.py | 8f8493bfb2cd3e0f887354470a8a3922143e4717 | [] | no_license | mmoth-kurtis/MMotH-Vent | 0c4afa14f882e3d6fff6aa3c354d142bc00ab906 | b1caff62bfdc60000e429a35fb4a4327dfbed4ea | refs/heads/master | 2023-02-24T00:02:09.078654 | 2021-01-29T19:19:25 | 2021-01-29T19:19:25 | 233,081,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import numpy as np
import matplotlib.pyplot as plt
original_stress = np.load('og_stress.npy')
original_stress = original_stress[0:701,0]
#stress_loaded = np.load('stress_array.npy')
stress_loaded = np.loadtxt('active_stress.csv',delimiter=',')
stress = stress_loaded[:,0]
#ca = np.load('calcium.npy')
#ca = ca[:,0]
#time=np.load('tarray.npy')
time = np.loadtxt('time.csv',delimiter=',')
print np.shape(time)
time=time[0,:]
t_target = [0,30,96,220]
f_target = [0,0,80000,40000]
fig, ax1 = plt.subplots(nrows=1,ncols=1)
ax1.plot(original_stress[0:594])
ax1.plot(stress[0:594])
ax1.plot(t_target,f_target,marker='o',linestyle='none')
"""ax2 = ax1.twinx()
color='tab:gray'
ax2.set_ylabel('calcium',color=color)
ax2.plot(time,ca,color=color)"""
plt.show()
| [
"ckma224@g.uky.edu"
] | ckma224@g.uky.edu |
ea1f6012027177dfbab004ab8bf20e0541775ee1 | 540e4d8e4092f2418a683170759eafe7b4b84d94 | /get_difference.py | 1d6cb471b9cbce9c16126c5f09fc4c9ac9543f7b | [] | no_license | reshmarameshbabu/Cattle-Tracking | 3d314bc3d3da174cb069d446e9a20ba434713dcc | 908fe27091744a571dede44eb5d2a8030d235bbe | refs/heads/master | 2023-01-20T06:33:53.855912 | 2020-11-26T09:26:11 | 2020-11-26T09:26:11 | 274,412,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | rows = open("urls.txt").read().strip().split("\n")
rows1 = open("urls1.txt").read().strip().split("\n")
total = 0
print("urls",len(rows))
print("urls",len(rows1))
l = []
# loop the URLs
for url in rows1:
if url not in rows:
l.append(url)
print(len(l))
#with open('new_urls.txt', 'w') as f:
# for item in l:
# f.write("%s\n" % item)
| [
"reshmaraemshbabu@gmail.com"
] | reshmaraemshbabu@gmail.com |
fd1369c47b58abeec8559d0b4b7d8a05650b30d4 | fe5cce1d566dd35809f45e4b07c9cbb14d15eff6 | /lexemize.py | 3c23bbb397d2840d08ca52aa3daa25ea2615cd75 | [] | no_license | Mersanko/stringtokenizer | 40053117e735f2879a926081221bc28ee953fa3e | 96f5cb74ccb1b6697c8dcf845b8176ebd3dbe1f1 | refs/heads/master | 2023-01-08T22:16:29.293841 | 2020-11-15T13:10:22 | 2020-11-15T13:10:22 | 313,016,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
class GUI():
def __init__(self,title):
self.title = title
self.window =Tk()
self.inputVar = StringVar()
self.outputVar = StringVar()
self.window.geometry('600x350')
self.window.title(self.title)
self.inputLabel = Label(self.window,text="Input Arithmetic Expressions",font=('Arial',14,'bold'))
self.inputLabel.place(x=175,y=50)
self.inputEntry = Entry(self.window,textvariable=self.inputVar,width=40)
self.inputEntry.place(x=190,y=80)
self.outputLabel = Label(self.window,text="Output",font=('Arial',14,'bold'))
self.outputLabel.place(x=280,y=150)
self.outputEntry = Entry(self.window,textvariable=self.outputVar,width=60)
self.outputEntry.place(x=130,y=180)
#button for displaying the output
self.buttonLexemize = Button(self.window,text="Lexemize",command=self.lexemize,height=1,width=10,bg="green2")
self.buttonLexemize.place(x=200, y=120)
#button for clearing the input and output entry
self.buttonClear = Button(self.window,text="Clear",command=self.clear,height=1,width=10,bg="turquoise1")
self.buttonClear.place(x=350, y=120)
#button for closing the window
self.buttonClose = Button(self.window,text="Close",command=self.exit,height=1,width=10,bg="red")
self.buttonClose.place(x=275, y=220)
self.window.mainloop()
def lexemize(self):
expressions = self.inputEntry.get()
#list for Operation, Parentheses and Brackets.
symbols = ["+","-","*","/","(",")","[","]"]
newExpresion = ""
'''Scan every character of the input expression, if the character is in the symbols list it will concatenate
a string containing space in the left side, symbol/operation in between and another space in the right side
to the newExpresion which is also a string. Otherwise the character will be directly concatenated to newExpresion.
This method will help to easily split/tokenized the input string because you can use the space as separator
argument or delimiter of the python split function'''
for expression in expressions:
if expression in symbols:
newExpresion+=" "+expression+" "
else:
newExpresion+=expression
#splitting the string and initialized a list from it.
tokenizedStringList = newExpresion.split(" ")
#removing the blank element in the list
while("" in tokenizedStringList):
tokenizedStringList.remove("")
result = "{}".format(tokenizedStringList)
self.outputEntry.delete(0,END)
#inserting the result to the output entry
self.outputEntry.insert(0,result)
def clear(self):
#clear the input and output entry
self.inputEntry.delete(0,END)
self.outputEntry.delete(0,END)
def exit(self):
#close the window
quit()
gui = GUI("Lexemizer")
| [
"mersanjr.canonigo@g.msuiit.edu.ph"
] | mersanjr.canonigo@g.msuiit.edu.ph |
d0ab0caeb92a58b3bd990e5cd29d3c0615018d0d | 3591fb0c79787d8d8ef1185d4164ac1916c0746c | /PyCast3/test_PyCast3.py | 8d30967b8a312b159e7986986eb6481cd249ed8e | [
"MIT"
] | permissive | Arthelon/PyCast3 | a7b4c1f1e753d6386d26887d130e7602cdb2af5c | 03c17b4e614ba4387ad4fe0d9ef50ccb27b39c5f | refs/heads/master | 2021-01-13T00:53:55.150391 | 2016-04-22T10:22:59 | 2016-04-22T10:22:59 | 51,683,121 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import pytest, requests, json
from .PyCast3 import get_data
def test_get_data():
with pytest.raises(requests.exceptions.ConnectionError):
get_data('https://foo.bar')
with pytest.raises(SystemExit):
get_data('http://www.google.com/barfoo')
assert isinstance(get_data('http://jsonip.com/'), dict)
assert isinstance(get_data('https://google.com', json=False), bytes)
with pytest.raises(json.decoder.JSONDecodeError):
get_data('http://www.feedforall.com/sample.xml')
| [
"hsing.daniel@gmail.com"
] | hsing.daniel@gmail.com |
1c7565818774d402fe19003d4c5d53d5e779f919 | e770e46365ea19abd2d87d34f8f573a52312e823 | /proy_2.py | 39a2f572f5c2bb2a5dc73c0e3080e68addfbfa7a | [] | no_license | gusbeca/ProyectoHAR | d62b8d6eca5619a44cbbdca0f37c0c9d5f9c72e7 | d60d5cf8f23cdfaad0fbff40772c7cc2eb98a2c3 | refs/heads/master | 2020-06-13T17:37:25.498271 | 2019-07-01T20:07:26 | 2019-07-01T20:07:26 | 194,734,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 30 17:46:36 2019
@author: Edgar Alberto Cañón
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as plt
from scipy.stats import mode
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
all_data = pd.DataFrame()
#Read data
path = 'C:/Users/Edgar Alberto Cañón/Documents/recognition/data.csv'
header = 'sensor_all_team'
sadl1 = pd.read_csv(path, sep=',', engine='python')
list(sadl1)
sadl1.dtypes
sadl1 = sadl1[['time', 'x', 'y', 'z', 'l']]
data = sadl1.iloc[:, :243]
labels = sadl1.iloc[:,4]
columns =['x', 'y', 'z', 'l']
filtered_data=data
filtered_data['time_2']=pd.to_datetime(filtered_data['time'])
filtered_data.index=filtered_data.time_2
filtered_data = filtered_data.sort_index()
#calculate mean over a 1 secondwindow
means = filtered_data[columns].rolling('3s').mean()
keep = filtered_data.time_2.dt.microsecond/1000%500
keep = keep -keep.shift() < 0
mode(['a','be','a','c'])
means = filtered_data[columns].rolling('3S').mean()[keep]
means.columns = [str(col) + '_mean' for col in means.columns]
variances = filtered_data[columns].rolling('3S').var()[keep]
variances.columns = [str(col) + '_var' for col in variances.columns]
labels.index = filtered_data.time_2
mode_labels = labels.rolling('3S').apply(lambda x: mode(x)[0])[keep]
all_features = pd.concat([means, variances], axis=1)
all_features['label'] = mode_labels
all_data = all_features
all_data = all_data.dropna()
list(all_data)
##### model
x = all_data[['x_mean',
'y_mean',
'z_mean',
'x_var',
'y_var',
'z_var']]
y = all_data[['label']]
X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=42)
clf = RandomForestClassifier(n_estimators=100, max_depth=100, random_state=0)
clf.fit(X_train, y_train)
y_predict=clf.predict(X_test)
#######confusion matrix
##classification report
print(classification_report(y_test,y_predict))
#confusion matrix
print(confusion_matrix(y_test,y_predict))
| [
"noreply@github.com"
] | noreply@github.com |
0e30d4d43c35797513445fa870cd4464563789fd | b5e145c952b36029ce52d0ad7dec50cb3b9783bd | /MemoryPuzzle.py | a6f83048842c8a1c5db6b400bd05d69d93514b3e | [] | no_license | heidili19/Home-Projects | e41ce9837a9ce7585fe2faf2ee289f9e1d4ed168 | 2f04b349ca1f36696443fcbb90c02f67818b462f | refs/heads/master | 2021-01-13T12:50:49.915064 | 2017-01-10T15:46:01 | 2017-01-10T15:46:01 | 78,471,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,063 | py | # Memory Puzzle
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random
import pygame
import sys
from pygame.locals import *
FPS = 30 # frames per second, the general speed of the program
WINDOWWIDTH = 640 # size of window's width in pixels
WINDOWHEIGHT = 480 # size of windows' height in pixels
REVEALSPEED = 8 # speed boxes' sliding reveals and covers
BOXSIZE = 40 # size of box height & width in pixels
GAPSIZE = 10 # size of gap between boxes in pixels
BOARDWIDTH = 4 # number of columns of icons
BOARDHEIGHT = 4 # number of rows of icons
assert (BOARDWIDTH * BOARDHEIGHT) % 2 == 0, 'Board needs to have an even number of boxes for pairs of matches.'
XMARGIN = int((WINDOWWIDTH - (BOARDWIDTH * (BOXSIZE + GAPSIZE))) / 2)
YMARGIN = int((WINDOWHEIGHT - (BOARDHEIGHT * (BOXSIZE + GAPSIZE))) / 2)
# R G B
GRAY = (100, 100, 100)
NAVYBLUE = ( 60, 60, 100)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
CYAN = ( 0, 255, 255)
BGCOLOR = NAVYBLUE
LIGHTBGCOLOR = GRAY
BOXCOLOR = WHITE
HIGHLIGHTCOLOR = BLUE
DONUT = 'donut'
SQUARE = 'square'
DIAMOND = 'diamond'
LINES = 'lines'
OVAL = 'oval'
ALLCOLORS = (RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE, CYAN)
ALLSHAPES = (DONUT, SQUARE, DIAMOND, LINES, OVAL)
assert len(ALLCOLORS) * len(ALLSHAPES) * 2 >= BOARDWIDTH * BOARDHEIGHT, "Board is too big for the number of shapes/colors defined."
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
mousex = 0 # used to store x coordinate of mouse event
mousey = 0 # used to store y coordinate of mouse event
pygame.display.set_caption('Memory Game')
mainBoard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxesData(False)
firstSelection = None # stores the (x, y) of the first box clicked.
DISPLAYSURF.fill(BGCOLOR)
startGameAnimation(mainBoard)
while True: # main game loop
mouseClicked = False
DISPLAYSURF.fill(BGCOLOR) # drawing the window
drawBoard(mainBoard, revealedBoxes)
for event in pygame.event.get(): # event handling loop
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
boxx, boxy = getBoxAtPixel(mousex, mousey)
if boxx != None and boxy != None:
# The mouse is currently over a box.
if not revealedBoxes[boxx][boxy]:
drawHighlightBox(boxx, boxy)
if not revealedBoxes[boxx][boxy] and mouseClicked:
revealBoxesAnimation(mainBoard, [(boxx, boxy)])
revealedBoxes[boxx][boxy] = True # set the box as "revealed"
if firstSelection == None: # the current box was the first box clicked
firstSelection = (boxx, boxy)
else: # the current box was the second box clicked
# Check if there is a match between the two icons.
icon1shape, icon1color = getShapeAndColor(mainBoard, firstSelection[0], firstSelection[1])
icon2shape, icon2color = getShapeAndColor(mainBoard, boxx, boxy)
if icon1shape != icon2shape or icon1color != icon2color:
# Icons don't match. Re-cover up both selections.
pygame.time.wait(1000) # 1000 milliseconds = 1 sec
coverBoxesAnimation(mainBoard, [(firstSelection[0], firstSelection[1]), (boxx, boxy)])
revealedBoxes[firstSelection[0]][firstSelection[1]] = False
revealedBoxes[boxx][boxy] = False
elif hasWon(revealedBoxes): # check if all pairs found
gameWonAnimation(mainBoard)
pygame.time.wait(2000)
# Reset the board
mainBoard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxesData(False)
# Show the fully unrevealed board for a second.
drawBoard(mainBoard, revealedBoxes)
pygame.display.update()
pygame.time.wait(1000)
# Replay the start game animation.
startGameAnimation(mainBoard)
firstSelection = None # reset firstSelection variable
# Redraw the screen and wait a clock tick.
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRevealedBoxesData(val):
revealedBoxes = []
for i in range(BOARDWIDTH):
revealedBoxes.append([val] * BOARDHEIGHT)
return revealedBoxes
def getRandomizedBoard():
# Get a list of every possible shape in every possible color.
icons = []
for color in ALLCOLORS:
for shape in ALLSHAPES:
icons.append( (shape, color) )
random.shuffle(icons) # randomize the order of the icons list
numIconsUsed = int(BOARDWIDTH * BOARDHEIGHT / 2) # calculate how many icons are needed
icons = icons[:numIconsUsed] * 2 # make two of each
random.shuffle(icons)
# Create the board data structure, with randomly placed icons.
board = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(icons[0])
del icons[0] # remove the icons as we assign them
board.append(column)
return board
def splitIntoGroupsOf(groupSize, theList):
# splits a list into a list of lists, where the inner lists have at
# most groupSize number of items.
result = []
for i in range(0, len(theList), groupSize):
result.append(theList[i:i + groupSize])
return result
def leftTopCoordsOfBox(boxx, boxy):
# Convert board coordinates to pixel coordinates
left = boxx * (BOXSIZE + GAPSIZE) + XMARGIN
top = boxy * (BOXSIZE + GAPSIZE) + YMARGIN
return (left, top)
def getBoxAtPixel(x, y):
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
boxRect = pygame.Rect(left, top, BOXSIZE, BOXSIZE)
if boxRect.collidepoint(x, y):
return (boxx, boxy)
return (None, None)
def drawIcon(shape, color, boxx, boxy):
quarter = int(BOXSIZE * 0.25) # syntactic sugar
half = int(BOXSIZE * 0.5) # syntactic sugar
left, top = leftTopCoordsOfBox(boxx, boxy) # get pixel coords from board coords
# Draw the shapes
if shape == DONUT:
pygame.draw.circle(DISPLAYSURF, color, (left + half, top + half), half - 5)
pygame.draw.circle(DISPLAYSURF, BGCOLOR, (left + half, top + half), quarter - 5)
elif shape == SQUARE:
pygame.draw.rect(DISPLAYSURF, color, (left + quarter, top + quarter, BOXSIZE - half, BOXSIZE - half))
elif shape == DIAMOND:
pygame.draw.polygon(DISPLAYSURF, color, ((left + half, top), (left + BOXSIZE - 1, top + half), (left + half, top + BOXSIZE - 1), (left, top + half)))
elif shape == LINES:
for i in range(0, BOXSIZE, 4):
pygame.draw.line(DISPLAYSURF, color, (left, top + i), (left + i, top))
pygame.draw.line(DISPLAYSURF, color, (left + i, top + BOXSIZE - 1), (left + BOXSIZE - 1, top + i))
elif shape == OVAL:
pygame.draw.ellipse(DISPLAYSURF, color, (left, top + quarter, BOXSIZE, half))
def getShapeAndColor(board, boxx, boxy):
# shape value for x, y spot is stored in board[x][y][0]
# color value for x, y spot is stored in board[x][y][1]
return board[boxx][boxy][0], board[boxx][boxy][1]
def drawBoxCovers(board, boxes, coverage):
# Draws boxes being covered/revealed. "boxes" is a list
# of two-item lists, which have the x & y spot of the box.
for box in boxes:
left, top = leftTopCoordsOfBox(box[0], box[1])
pygame.draw.rect(DISPLAYSURF, BGCOLOR, (left, top, BOXSIZE, BOXSIZE))
shape, color = getShapeAndColor(board, box[0], box[1])
drawIcon(shape, color, box[0], box[1])
if coverage > 0: # only draw the cover if there is an coverage
pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, coverage, BOXSIZE))
pygame.display.update()
FPSCLOCK.tick(FPS)
def revealBoxesAnimation(board, boxesToReveal):
# Do the "box reveal" animation.
for coverage in range(BOXSIZE, (-REVEALSPEED) - 1, -REVEALSPEED):
drawBoxCovers(board, boxesToReveal, coverage)
def coverBoxesAnimation(board, boxesToCover):
# Do the "box cover" animation.
for coverage in range(0, BOXSIZE + REVEALSPEED, REVEALSPEED):
drawBoxCovers(board, boxesToCover, coverage)
def drawBoard(board, revealed):
# Draws all of the boxes in their covered or revealed state.
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
if not revealed[boxx][boxy]:
# Draw a covered box.
pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, BOXSIZE, BOXSIZE))
else:
# Draw the (revealed) icon.
shape, color = getShapeAndColor(board, boxx, boxy)
drawIcon(shape, color, boxx, boxy)
def drawHighlightBox(boxx, boxy):
left, top = leftTopCoordsOfBox(boxx, boxy)
pygame.draw.rect(DISPLAYSURF, HIGHLIGHTCOLOR, (left - 5, top - 5, BOXSIZE + 10, BOXSIZE + 10), 4)
def startGameAnimation(board):
# Randomly reveal the boxes 8 at a time.
coveredBoxes = generateRevealedBoxesData(False)
boxes = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
boxes.append( (x, y) )
random.shuffle(boxes)
boxGroups = splitIntoGroupsOf(8, boxes)
drawBoard(board, coveredBoxes)
for boxGroup in boxGroups:
revealBoxesAnimation(board, boxGroup)
coverBoxesAnimation(board, boxGroup)
def gameWonAnimation(board):
# flash the background color when the player has won
coveredBoxes = generateRevealedBoxesData(True)
color1 = LIGHTBGCOLOR
color2 = BGCOLOR
for i in range(13):
color1, color2 = color2, color1 # swap colors
DISPLAYSURF.fill(color1)
drawBoard(board, coveredBoxes)
pygame.display.update()
pygame.time.wait(300)
def hasWon(revealedBoxes):
# Returns True if all the boxes have been revealed, otherwise False
for i in revealedBoxes:
if False in i:
return False # return False if any boxes are covered.
return True
if __name__ == '__main__':
main()
| [
"heidili19@gmail.com"
] | heidili19@gmail.com |
09df25783a1779637e452c6e382451df52245c37 | bcd9da8c2a8489ed32ab6e28ae66f51d390a18c1 | /src/comment/tests.py | 413e4aeb2d8fb09168072afebf7837f15d8f8bf6 | [] | no_license | Rysbai/digiskills | 34b6b8e08882ee73f2016068ee122739880e947e | 39a205c5cb5d278b3a65a8ea4cd94660b2db97fc | refs/heads/master | 2022-12-13T16:01:01.220566 | 2020-01-30T11:36:06 | 2020-01-30T11:36:06 | 223,612,366 | 0 | 0 | null | 2022-12-08T03:18:38 | 2019-11-23T15:41:07 | API Blueprint | UTF-8 | Python | false | false | 1,712 | py | from django.urls import reverse
from rest_framework import status
from unittest import mock
from django.test import TestCase
from comment.serializers import CommentSerializers
from comment.factory import CommentFactory
class CommentAPITest(TestCase):
def test_should_return_list_of_all_comments(self):
path = reverse('comment:comment_list_and_create')
comments = CommentFactory.create_many()
response = self.client.get(path)
body = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(body, CommentSerializers(comments, many=True).data)
def test_should_create_comment(self):
path = reverse('comment:comment_list_and_create')
data = {
'name': 'Example name',
'phone': '+996779583738',
'text': 'Some comments here'
}
response = self.client.post(path, data=data, content_type='application/json')
body = response.json()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(body['name'], data['name'])
self.assertEqual(body['phone'], data['phone'])
self.assertEqual(body['text'], data['text'])
@mock.patch('comment.utils.send_comment_to_admin_email')
def test_should_send_mail_to_admin(self, mocked_send_comment_to_admin_email):
path = reverse('comment:comment_list_and_create')
data = {
'name': 'Example name',
'phone': '+996779583738',
'text': 'Some comments here'
}
self.client.post(path, data=data, content_type='application/json')
mocked_send_comment_to_admin_email.assert_called_once()
| [
"rysbai.coder@gmail.com"
] | rysbai.coder@gmail.com |
0fce395d84bb32bd832c506593f6cb5c31c596aa | 128c4788a33fc51dd244badcfc2053eac3cc3b3b | /servos/controller.py | 1dc01f90aaabcd69ef85fed1bae686a08394c00b | [] | no_license | krixoalejo/backend_domotic_house | e66ad91d9a329bd56cedbf60b5c6945f4a4481e5 | 0a8fcb30e1c9ead3b757ae16bc3e6f63cc7c6e60 | refs/heads/master | 2020-04-07T19:17:02.454563 | 2018-11-22T04:30:24 | 2018-11-22T04:30:24 | 158,642,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | from flask import request, jsonify,Blueprint, abort
from flask.views import MethodView
from RPi import GPIO
from time import sleep
servosRoute = Blueprint('servosRoute', __name__)
from main import app
@servosRoute.route('/abrirPersiana1', methods=['GET'])
def abrirPersiana1():
#GPIO
PIN = 12
cant = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(5)
sleep(1)
while cant < 2:
servo.ChangeDutyCycle(5)
sleep(0.5)
cant = cant + 1
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Abierta'
}
return jsonify(res)
@servosRoute.route('/cerrarPersiana1', methods=['GET'])
def cerrarPersiana1():
#GPIO
PIN = 12
cant = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(7)
sleep(1)
while cant < 3:
servo.ChangeDutyCycle(10)
sleep(0.5)
cant = cant + 1
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Cerrada'
}
return jsonify(res)
@servosRoute.route('/abrirPersiana2', methods=['GET'])
def abrirPersiana2():
#GPIO
PIN = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(7)
sleep(1)
servo.ChangeDutyCycle(10)
sleep(1)
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Abierta'
}
return jsonify(res)
@servosRoute.route('/cerrarPersiana2', methods=['GET'])
def cerrarPersiana2():
#GPIO
PIN = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(5)
sleep(1)
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Cerrada'
}
return jsonify(res)
@servosRoute.route('/abrirPuertaGaraje', methods=['GET'])
def abrirPuertaGaraje():
#GPIO
PIN = 21
cant = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(5)
sleep(1)
while cant < 2:
servo.ChangeDutyCycle(5)
sleep(0.5)
cant = cant + 1
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Abierta'
}
return jsonify(res)
@servosRoute.route('/cerrarPuertaGaraje', methods=['GET'])
def cerrarPuertaGaraje():
#GPIO
PIN = 21
cant = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(7)
sleep(1)
while cant < 4:
servo.ChangeDutyCycle(10)
sleep(0.5)
cant = cant + 1
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Cerrada'
}
return jsonify(res)
@servosRoute.route('/abrirPuertaPrincipal', methods=['GET'])
def abrirPuertaPrincipal():
#GPIO
PIN = 20
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(7)
sleep(1)
servo.ChangeDutyCycle(10)
sleep(1)
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Abierta'
}
return jsonify(res)
@servosRoute.route('/cerrarPuertaPrincipal', methods=['GET'])
def cerrarPuertaPrincipal():
#GPIO
PIN = 20
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
servo = GPIO.PWM(PIN, 50)
servo.start(5)
sleep(1)
servo.stop()
GPIO.cleanup()
res = {
'estado': 'Cerrada'
}
return jsonify(res)
| [
"krixoalejo@gmail.com"
] | krixoalejo@gmail.com |
68ef7ecd6ae99081df59c4cda272a675767819fa | 2c6ad479836272e776a72e0728185a81bc08a32f | /project/peer-review/Mseddi Mahdi/utils.py | 6928595029f67890060107dd760de35f93c8e57e | [] | no_license | lxwvictor/natural-language-processing | 7777b277dcd96cd3e2586f25a7c42b61bef795b6 | 94e5c7813269e1d4979e3639bf63b0a759919913 | refs/heads/master | 2021-06-29T21:57:20.931810 | 2021-02-01T09:26:07 | 2021-02-01T09:26:07 | 202,828,639 | 0 | 0 | null | 2019-08-17T03:08:56 | 2019-08-17T03:08:56 | null | UTF-8 | Python | false | false | 2,692 | py | import nltk
import pickle
import re
import numpy as np
import csv
nltk.download('stopwords')
from nltk.corpus import stopwords
# Paths for all resources for the bot.
RESOURCE_PATH = {
'INTENT_RECOGNIZER': 'intent_recognizer.pkl',
'TAG_CLASSIFIER': 'tag_classifier.pkl',
'TFIDF_VECTORIZER': 'tfidf_vectorizer.pkl',
'THREAD_EMBEDDINGS_FOLDER': 'thread_embeddings_by_tags',
'WORD_EMBEDDINGS': 'word_embeddings.tsv',
}
def text_prepare(text):
"""Performs tokenization and simple preprocessing."""
replace_by_space_re = re.compile('[/(){}\[\]\|@,;]')
bad_symbols_re = re.compile('[^0-9a-z #+_]')
stopwords_set = set(stopwords.words('english'))
text = text.lower()
text = replace_by_space_re.sub(' ', text)
text = bad_symbols_re.sub('', text)
text = ' '.join([x for x in text.split() if x and x not in stopwords_set])
return text.strip()
def load_embeddings(embeddings_path):
"""Loads pre-trained word embeddings from tsv file.
Args:
embeddings_path - path to the embeddings file.
Returns:
embeddings - dict mapping words to vectors;
embeddings_dim - dimension of the vectors.
"""
# Note that here you also need to know the dimension of the loaded embeddings.
# When you load the embeddings, use numpy.float32 type as dtype
embeddings={}
with open(embeddings_path,newline='') as embedding_obj:
lines=csv.reader(embedding_obj,delimiter='\t')
for line in lines:
word=line[0]
embedding=np.array(line[1:]).astype(np.float32)
embeddings[word]=embedding
dim=len(line)-1
return embeddings,dim
# Hint: you have already implemented a similar routine in the 3rd assignment.
# Note that here you also need to know the dimension of the loaded embeddings.
# When you load the embeddings, use numpy.float32 type as dtype
########################
#### YOUR CODE HERE ####
########################
def question_to_vec(question, embeddings, dim):
"""Transforms a string to an embedding by averaging word embeddings."""
# Hint: you have already implemented exactly this function in the 3rd assignment.
########################
#### YOUR CODE HERE ####
########################
word_embedding=[embeddings[word] for word in question.split() if word in embeddings]
if not word_embedding:
return np.zeros(dim)
words_embeddings = np.array(word_embedding)
return np.mean(words_embeddings,axis=0)
def unpickle_file(filename):
"""Returns the result of unpickling the file content."""
with open(filename, 'rb') as f:
return pickle.load(f)
| [
"lxwvictor@gmail.com"
] | lxwvictor@gmail.com |
2d8bdf85ce1a62bcce86f423043b095ebdfff032 | 851d55b8423069abef9e51bf8f41c522ab5b1be1 | /models/YOLO_small_tf.py | f645c280358ebc80469092fb2000b454cf5fd391 | [] | no_license | cid2105/car-counter | 8905f9e69bcb4ccd7412a89623814525a3b413eb | 5a0d02895ca49fd953c813268d1d9431fbe07383 | refs/heads/master | 2020-07-11T23:49:56.254873 | 2016-11-17T05:04:48 | 2016-11-17T05:05:27 | 73,991,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,381 | py | import urllib
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
class YOLO_TF:
disp_console = True
weights_file = 'models/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.2
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
#classes = ["car"]
w_img = 640
h_img = 480
def __init__(self,argvs = []):
if not os.path.isfile(self.weights_file):
print "downloading weights file"
urllib.urlretrieve ("https://www.dropbox.com/s/bf9py7vyqvw1ltr/YOLO_small.ckpt?dl=1", self.weights_file)
else:
print "weights file found"
self.disp_console = True
self.build_networks()
def predict(self, img_file):
img = cv2.imdecode(np.fromstring(img_file.read(), np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
results = self.detect_from_cvmat(img)
return self.parse_results(results)
def test_predict(self, img_file):
results = self.detect_from_file(img_file)
return self.parse_results(results)
def parse_results(self, results):
response = {"cars": list()}
car_count = 0
for i in range(len(results)):
if results[i][0] == "car":
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
probability = str(results[i][5])
response["cars"] += [({"x":str(x),"y":str(y),"w":str(w),"h":str(h),"probability":str(probability)})]
car_count += 1
response["car_count"] = car_count
return response
def build_networks(self):
if self.disp_console : print "Building YOLO_small graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
return self.result
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
return self.detect_from_cvmat(img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
| [
"cole.ian.diamond@gmail.com"
] | cole.ian.diamond@gmail.com |
c2651c28d0497dedcdaabee35edb1d9456ca059e | 23dc8e94d88ebb45b5ec3bc6ab69d008042191b7 | /post/admin.py | 23c526414bf414b16c539dbf0220fbde975d6557 | [] | no_license | KorsPav/social-network-api | 220d04ffbe16f8f09a91e758f28ffb38136862c4 | 81a1c619c0f7bd8724daad44f9d700b9670b7e2d | refs/heads/main | 2023-05-31T19:05:31.041770 | 2021-06-22T17:02:11 | 2021-06-22T17:02:11 | 377,595,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.contrib import admin
from post.models import Post, Like
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Like)
| [
"pavel.korshikov@gmail.com"
] | pavel.korshikov@gmail.com |
8a59a7dae5539de775b44ce14fd2617c4935163f | 32289f142a2e47bbb9470607886782036bdadc09 | /looping 7 - to find that number is a palindrome or not.py | 8a220f0f32916507321734d1321e4aab2b0dbd96 | [] | no_license | chakomash/python-3.0-basic-programs | a95178589bae432e9087cb28e3d6686b3e1510fb | 6dc5799d9518ffae6e544a6f1fe6d1b7665026d1 | refs/heads/master | 2022-08-03T11:28:00.977796 | 2020-05-29T18:45:29 | 2020-05-29T18:45:29 | 267,926,977 | 0 | 0 | null | 2020-05-29T18:22:36 | 2020-05-29T18:22:35 | null | UTF-8 | Python | false | false | 221 | py | n=int(input("enter a number"))
s=0
a=n
r=0
while(n!=0):
r=n%10
s=s*10+r
n=int(n/10)
if(a==s):
print("the number is a palindrome")
else:
print ("the number is not a palindrome")
| [
"noreply@github.com"
] | noreply@github.com |
4fe85ae61e7e304422ef59491436b0173b986aac | 149441474fc85adebd3137c1c12d6f6a50f9b160 | /docs/conf.py | 7f387c11630449d531a82ccf559262ec94dd1bd7 | [
"MIT"
] | permissive | kepsic/moitoi_docker_hive | c5374de083a215e2d5d1f64a067175a467875eee | 95b38795ecf3a69bb2199e6136264767761df6a3 | refs/heads/master | 2023-01-08T05:38:45.528048 | 2020-01-02T12:09:10 | 2020-01-02T12:09:10 | 230,942,542 | 0 | 0 | MIT | 2019-12-30T16:50:45 | 2019-12-30T15:58:12 | Python | UTF-8 | Python | false | false | 4,924 | py | #!/usr/bin/env python
#
# moitoi_docker_hive documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import moitoi_docker_hive
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MoiToi Docker Hive'
copyright = "2019, Andres Kepler"
author = "Andres Kepler"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = moitoi_docker_hive.__version__
# The full version, including alpha/beta/rc tags.
release = moitoi_docker_hive.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'moitoi_docker_hivedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'moitoi_docker_hive.tex',
'MoiToi Docker Hive Documentation',
'Andres Kepler', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'moitoi_docker_hive',
'MoiToi Docker Hive Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'moitoi_docker_hive',
'MoiToi Docker Hive Documentation',
author,
'moitoi_docker_hive',
'One line description of project.',
'Miscellaneous'),
]
| [
"andres@kepler.ee"
] | andres@kepler.ee |
4a13ba1319edbfe715b0595a65cffb4119942d5b | b84c89d0ade21bf8c2df9d0cf8f94d7a27c2824b | /test/integration/test_cursor.py | fc9dc209577a61eeb75a497eb6aa8552833b627a | [
"Apache-2.0"
] | permissive | srlabUsask/py2neo | 931b06678561201d56a36ec10da7ad4614ab6c87 | 80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f | refs/heads/master | 2022-11-16T21:17:42.319698 | 2020-07-12T23:00:29 | 2020-07-12T23:00:29 | 279,281,481 | 0 | 0 | Apache-2.0 | 2020-07-13T11:17:53 | 2020-07-13T11:17:50 | null | UTF-8 | Python | false | false | 5,453 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises
from py2neo import Record, Subgraph
def test_cannot_move_beyond_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward()
assert not cursor.forward()
def test_can_only_move_until_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(2) == 1
def test_moving_by_zero_keeps_same_position(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(0) == 0
def test_keys_are_populated_before_moving(graph):
cursor = graph.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_after_moving(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_before_moving_within_a_transaction(graph):
with graph.begin() as tx:
cursor = tx.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_stats_available(graph):
cursor = graph.run("CREATE (a:Banana)")
stats = cursor.stats()
assert stats["nodes_created"] == 1
assert stats["labels_added"] == 1
assert stats["contained_updates"] == 1
def test_current_is_none_at_start(graph):
cursor = graph.run("RETURN 1")
assert cursor.current is None
def test_current_updates_after_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert cursor.current == Record(zip(["n"], [n]))
def test_select_picks_next(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_cannot_select_past_end(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_selection_triggers_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
for i in range(1, 11):
n, n_sq = next(cursor)
assert n == i
assert n_sq == i * i
def test_can_use_next_function(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_raises_stop_iteration(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_can_get_data(graph):
cursor = graph.run("UNWIND range(1, 3) AS n RETURN n, n * n AS n_sq")
data = cursor.data()
assert data == [{"n": 1, "n_sq": 1}, {"n": 2, "n_sq": 4}, {"n": 3, "n_sq": 9}]
def test_stream_yields_all(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [1, 1])),
Record(zip(["n", "n_sq"], [2, 4])),
Record(zip(["n", "n_sq"], [3, 9])),
Record(zip(["n", "n_sq"], [4, 16])),
Record(zip(["n", "n_sq"], [5, 25])),
Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_stream_yields_remainder(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
cursor.forward(5)
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_can_evaluate_single_value(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate()
assert value == 1
def test_can_evaluate_value_by_index(graph):
cursor = graph.run("RETURN 1, 2")
value = cursor.evaluate(1)
assert value == 2
def test_can_evaluate_value_by_key(graph):
cursor = graph.run("RETURN 1 AS first, 2 AS second")
value = cursor.evaluate("second")
assert value == 2
def test_evaluate_with_no_records_is_none(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
value = cursor.evaluate()
assert value is None
def test_evaluate_on_non_existent_column_is_none(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate(1)
assert value is None
def test_to_subgraph(graph):
s = graph.run("CREATE p=(:Person {name:'Alice'})-[:KNOWS]->(:Person {name:'Bob'}) RETURN p").to_subgraph()
assert isinstance(s, Subgraph)
assert len(s.nodes) == 2
assert len(s.relationships) == 1
| [
"nigel@neo4j.com"
] | nigel@neo4j.com |
d62b71cee786178eddaf065c8d8850790282de38 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v36r1p3/InstallArea/x86_64-slc6-gcc48-opt/python/StrippingArchive/Stripping20r0p2/Beauty2Charm_Lb2XBuilder.py | c4d358fa0c53e8008b7e6ea45f0cfda3390b17fc | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,053 | py | #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
from copy import deepcopy
from Gaudi.Configuration import *
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from PhysSelPython.Wrappers import Selection
from Beauty2Charm_LoKiCuts import LoKiCuts
from Beauty2Charm_Utils import *
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
class LcBuilder(object):
'''Produces all Lambda_c baryons for the Beauty2Charm module.'''
def __init__(self,pions,kaons,protons,config,config_pid):
self.pions = pions
self.kaons = kaons
self.protons = protons
self.config = config
self.pkpi = [self._makeLc2pKpi()]
self.pkpi_pid = [filterPID('Lc2pKPiPID',self.pkpi,config_pid)]
self.xic_pkpi = [self._makeXic2pKpi()]
def _makeLc2pKpi(self):
'''Makes Lc -> p K pi + cc'''
dm,units = LoKiCuts.cutValue(self.config['MASS_WINDOW'])
comboCuts = [LoKiCuts(['ASUMPT'],self.config).code(),
"(ADAMASS('Lambda_c+') < %s*%s) " % (dm+10,units),
hasTopoChild()]
comboCuts.append(LoKiCuts(['AMAXDOCA'],self.config).code())
comboCuts = LoKiCuts.combine(comboCuts)
momCuts = ["(ADMASS('Lambda_c+') < %s*%s) " % (dm,units),
LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVDIRA'],
self.config).code()]
momCuts = LoKiCuts.combine(momCuts)
cp = CombineParticles(CombinationCut=comboCuts,MotherCut=momCuts,
DecayDescriptors=["[Lambda_c+ -> p+ K- pi+]cc"])
return Selection('Lc2PKPiBeauty2Charm',Algorithm=cp,
RequiredSelections=[self.pions,self.kaons,
self.protons])
def _makeXic2pKpi(self):
'''Makes Xic -> p K pi + cc'''
dm,units = LoKiCuts.cutValue(self.config['MASS_WINDOW'])
comboCuts = [LoKiCuts(['ASUMPT'],self.config).code(),
"(ADAMASS('Xi_c+') < %s*%s) " % (dm+10,units),
hasTopoChild()]
comboCuts.append(LoKiCuts(['AMAXDOCA'],self.config).code())
comboCuts = LoKiCuts.combine(comboCuts)
momCuts = ["(ADMASS('Xi_c+') < %s*%s) " % (dm,units),
LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVDIRA'],
self.config).code()]
momCuts = LoKiCuts.combine(momCuts)
cp = CombineParticles(CombinationCut=comboCuts,MotherCut=momCuts,
DecayDescriptors=["[Xi_c+ -> p+ K- pi+]cc"])
return Selection('Xic2PKPiBeauty2Charm',Algorithm=cp,
RequiredSelections=[self.pions,self.kaons,
self.protons])
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
class Lb2XBuilder(object):
'''Makes all Lambda_b -> X lines.'''
def __init__(self,lc,d,hh,topoPions,topoKaons,protons,hhh,dst,lambda0,config):
self.lc = lc.pkpi
self.lc_pid = lc.pkpi_pid
self.xic = lc.xic_pkpi
self.d = d
self.d0 = d.hh
self.hh = hh
self.hhh = hhh
self.dst = dst
self.lambda0 = lambda0
self.topoPions = [topoPions]
self.topoKaons = [topoKaons]
self.protons = [protons]
self.config = deepcopy(config)
self.config['AM_MIN'] = '5200*MeV'
self.lines = []
# Lb -> Lc+- H-+ (+WS)
self._makeLb2LcH()
# Lb -> Xic+- H-+ (+WS)
self._makeLb2XicH()
# Sb+- -> D0(HH) p+-
self._makeSb2D0P()
# Sb -> D-+(HHH) p+-
self._makeSb02DP()
# Lb -> D0(HH) p+- H-+
self._makeLb2D0PH()
# Lb -> Lc+- 3Pi, KPiPi, ppbarPi, ppbarK (+WS)
self._makeLb2LcHHH()
# Lb -> Lc D (+WS)
self._makeLb2LcD()
# Lb -> Lc D* (+WS)
self._makeLb2LcDst()
# X -> Lc Lc (+WS)
self._makeX2LcLc()
# Lb -> Lc 5pi
self._makeLb2Lc5Pi()
# Lb -> D0 Lambda0
self._makeLb2D0Lambda0()
def _makeLb2LcH(self):
'''Make RS and WS Lb -> Lc H (H=pi,K) + cc.'''
pions = self.topoPions
kaons = self.topoKaons
decays = {'Lb2LcPi': ["[Lambda_b0 -> Lambda_c+ pi-]cc"],
'Lb2LcK' : ["[Lambda_b0 -> Lambda_c+ K-]cc"]}
inputs = {'Lb2LcPi': self.lc+pions, 'Lb2LcK': self.lc+kaons}
rs = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiWS': ["[Lambda_b0 -> Lambda_c+ pi+]cc"],
'Lb2LcKWS' : ["[Lambda_b0 -> Lambda_c+ K+]cc"]}
inputs = {'Lb2LcPiWS':self.lc+pions, 'Lb2LcKWS':self.lc+kaons}
ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiNoIP': ["[Lambda_b0 -> Lambda_c+ pi-]cc"]}
inputs = {'Lb2LcPiNoIP': self.lc_pid+pions}
noip = makeB2XSels(decays,'Lc2PKPi',inputs,self.config,False)
decays = {'Lb2LcPiNoIPWS': ["[Lambda_b0 -> Lambda_c+ pi+]cc"]}
inputs = {'Lb2LcPiNoIPWS': self.lc_pid+pions}
noip_ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config,False)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
self.lines.append(ProtoLine(noip,1.0))
self.lines.append(ProtoLine(noip_ws,0.1))
def _makeLb2XicH(self):
'''Make RS and WS Lb -> Xi_c H (H=pi,K) + cc.'''
pions = self.topoPions
kaons = self.topoKaons
decays = {'Lb2XicPi': ["[Lambda_b0 -> Xi_c+ pi-]cc"],
'Lb2XicK' : ["[Lambda_b0 -> Xi_c+ K-]cc"]}
inputs = {'Lb2XicPi': self.xic+pions, 'Lb2XicK': self.xic+kaons}
rs = makeB2XSels(decays,'Xic2PKPi',inputs,self.config)
decays = {'Lb2XicPiWS': ["[Lambda_b0 -> Xi_c+ pi+]cc"],
'Lb2XicKWS' : ["[Lambda_b0 -> Xi_c+ K+]cc"]}
inputs = {'Lb2XicPiWS':self.xic+pions, 'Lb2XicKWS':self.xic+kaons}
ws = makeB2XSels(decays,'Xic2PKPi',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2LcHHH(self):
'''Make RS and WS Lb -> Lc HHH (H=pi,K) + cc.'''
pipipi = self.hhh.pipipi
kpipi = self.hhh.kpipi
kkpi = self.hhh.kkpi
ppbarpi = self.hhh.ppbarpi
ppbark = self.hhh.ppbark
decays = {'Lb2LcPiPiPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcKPiPi' : ["[Lambda_b0 -> Lambda_c+ K_1(1270)-]cc"],
'Lb2LcppbarPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcppbarK' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcKKPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"]}
inputs = {'Lb2LcPiPiPi' : self.lc_pid+pipipi,
'Lb2LcKPiPi' : self.lc_pid+kpipi,
'Lb2LcppbarPi' : self.lc_pid+ppbarpi,
'Lb2LcppbarK' : self.lc_pid+ppbark,
'Lb2LcKKPi' : self.lc_pid+kkpi}
rs = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiPiPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcKPiPiWS' : ["[Lambda_b0 -> Lambda_c+ K_1(1270)+]cc"],
'Lb2LcppbarPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcppbarKWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcKKPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"]}
inputs = {'Lb2LcPiPiPiWS' : self.lc_pid+pipipi,
'Lb2LcKPiPiWS' : self.lc_pid+kpipi,
'Lb2LcppbarPiWS' : self.lc_pid+ppbarpi,
'Lb2LcppbarKWS' : self.lc_pid+ppbark,
'Lb2LcKKPiWS' : self.lc_pid+kkpi}
ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2D0PH(self):
'''Makes RS Lb -> D0(HH) p+- H-+ + c.c. and WS lines'''
decs = ["Lambda_b0 -> D0 Lambda0","Lambda_b0 -> D0 Lambda~0"]
decays = {'Lb2D0PH': decs}
inputs = {'Lb2D0PH': self.d0+self.hh.ph_pid}
rs = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2D0PHWS': decs}
inputs = {'Lb2D0PHWS': self.d0+self.hh.ph_ws}
ws = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2D0Lambda0(self):
'''Makes RS Lb -> D0(HH) Lambda0 + c.c.'''
decs = ["Lambda_b0 -> D0 Lambda0","Lambda_b0 -> D0 Lambda~0"]
decays = {'Lb2D0Lambda0DD': decs}
inputs = {'Lb2D0Lambda0DD': self.d0 + self.lambda0["DD"]}
lb_dd = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(lb_dd,1.0))
decays = {'Lb2D0Lambda0LL': decs}
inputs = {'Lb2D0Lambda0LL': self.d0 + self.lambda0["LL"]}
lb_ll = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(lb_ll,1.0))
def _makeSb02DP(self):
'''Make RS and WS Sb0 -> D+- p-+ + cc.'''
protons = self.protons
decays = {'Sb02DP': ["[Sigma_b0 -> D- p+]cc"]}
inputs = {'Sb02DP': self.d.hhh_pid+protons}
rs = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
decays = {'Sb02DPWS': ["[Sigma_b0 -> D+ p+]cc"]}
inputs = {'Sb02DPWS': self.d.hhh_pid+protons}
ws = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeSb2D0P(self):
'''Make Sb+- -> D0 p+- + cc.'''
protons = self.protons
decays = {'Sb2D0P': ["Sigma_b+ -> D0 p+","Sigma_b- -> D0 p~-"]}
inputs = {'Sb2D0P': self.d.hh_pid+protons}
rs = makeB2XSels(decays,'D2HHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
def _makeLb2LcD(self):
'''Makes RS + WS Lb -> Lc D + c.c.'''
decays = {'Lb2LcD': ["[Lambda_b0 -> Lambda_c+ D-]cc"]}
inputs = {'Lb2LcD': self.d.hhh_pid+self.lc_pid}
rs = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2LcDWS': ["[Lambda_b0 -> Lambda_c+ D+]cc"]}
inputs = {'Lb2LcDWS': self.d.hhh_pid+self.lc_pid}
ws = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2LcDst(self):
'''Makes RS + WS Lb -> Lc D* + c.c.'''
decays = {'Lb2LcDst': ["[Lambda_b0 -> Lambda_c+ D*(2010)-]cc"]}
inputs = {'Lb2LcDst': self.dst.d0pi_pid+self.lc_pid}
rs = makeB2XSels(decays,'Dstar2D0PiPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2LcDstWS': ["[Lambda_b0 -> Lambda_c+ D*(2010)+]cc"]}
inputs = {'Lb2LcDstWS': self.dst.d0pi_pid+self.lc_pid}
ws = makeB2XSels(decays,'Dstar2D0PiPID',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeX2LcLc(self):
config = deepcopy(self.config)
config['AM_MIN' ] = '4800*MeV'
decays = {'X2LcLc': ["[B0 -> Lambda_c+ Lambda_c~-]cc"]}
inputs = {'X2LcLc': self.lc_pid}
rs = makeB2XSels(decays,'',inputs,config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'X2LcLcWS': ["[B0 -> Lambda_c+ Lambda_c+]cc"]}
inputs = {'X2LcLcWS': self.lc_pid}
ws = makeB2XSels(decays,'',inputs,config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2Lc5Pi(self):
decays = {'Lb2Lc5Pi':
["[Lambda_b0 -> Lambda_c+ a_1(1260)- rho(770)0]cc"]}
inputs = {'Lb2Lc5Pi': self.lc_pid + self.hhh.pipipi + self.hh.pipi_pid}
lb2lc5pi = makeB2XSels(decays,'Lc2PKPiPID',inputs,self.config)
self.lines.append(ProtoLine(lb2lc5pi,1.0))
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
a88f046366e59b9019ba58620dd77522a9c42a0a | 616cc6c05f525dd2cb67916601f6ecd2c8242f24 | /homework/hw01/problems/client/cli/ok.py | 66ac72252d323b4bd4a142ddda60f52f67f70359 | [] | no_license | cookieli/cs61a_li | 6f1d51aad7cd32fb27f64c855b3803bd2f8d9aad | 6ee0df9c64842bde9e30a0484e661abf04212358 | refs/heads/master | 2020-04-07T14:32:38.337554 | 2018-03-07T10:18:03 | 2018-03-07T10:18:03 | 124,218,933 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,952 | py | """ok is an autograder that you can use to run tests, back up your work, and
submit assignments.
You can run all tests with
python3 ok
There are several "options" you can give ok to modify its behavior. These
options generally have both a short form (preceded by a single dash, like -q)
or a long form (preceded by two dashes, like --question). This is similar to how
many other command line applications accept options. These options can be mixed
and matched in any order. The options are listed in full below, but we'll
describe some of the more common ones here.
To test a specific question, use the -q (or --question) option with the name of
the question:
python3 ok -q foo
python3 ok -q 12
By default, only tests that fail will appear. If you want to see the results
from all tests, you can use the -v (or --verbose) option:
python3 ok -q foo -v
To start an interactive interpreter after a failed test for debugging, use the
-i (or --interactive) option:
python3 ok -q foo -i
By default, after each test run ok will attempt to back up your work to the
server. To run the tests without any network access, use the --local option:
python3 ok -q foo --local
To submit the assignment after you're done, use the --submit option:
python3 ok --submit
Finally, to log out and log in under a different email, use --authenticate:
python3 ok --authenticate
Visit https://okpy.org to view your backups and submissions.
"""
from client import exceptions as ex
from client.api import assignment
from client.cli.common import messages
from client.utils import auth
from client.utils import output
from client.utils import software_update
from datetime import datetime
import argparse
import client
import logging
import os
import sys
import struct
LOGGING_FORMAT = '%(levelname)s | %(filename)s:%(lineno)d | %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger('client') # Get top-level logger
CLIENT_ROOT = os.path.dirname(client.__file__)
##########################
# Command-line Interface #
##########################
def parse_input(command_input=None):
"""Parses command line input."""
parser = argparse.ArgumentParser(
prog='python3 ok',
description=__doc__,
usage='%(prog)s [--help] [options]',
formatter_class=argparse.RawDescriptionHelpFormatter)
testing = parser.add_argument_group('running tests')
testing.add_argument('-q', '--question', type=str, action='append',
help="run tests for a specific question")
testing.add_argument('--suite', type=int, default=None,
help="run cases from a specific suite")
testing.add_argument('--case', type=int, action='append',
help="run specific cases")
testing.add_argument('-u', '--unlock', action='store_true',
help="unlock tests interactively")
testing.add_argument('-i', '--interactive', action='store_true',
help="start the Python interpreter after a failed test")
testing.add_argument('-v', '--verbose', action='store_true',
help="show all tests, not just passing tests")
testing.add_argument('--all', action='store_true',
help="run tests for all questions in config file")
testing.add_argument('--submit', action='store_true',
help="submit the assignment")
testing.add_argument('--backup', action='store_true',
help="attempt to reliably backup your work")
testing.add_argument('--revise', action='store_true',
help="submit composition revision")
testing.add_argument('--restore', action='store_true',
help="restore assignment from an earlier backup")
testing.add_argument('--timeout', type=int, default=10,
help="set the timeout duration (in seconds) for running tests")
# Experiments
experiment = parser.add_argument_group('experiment options')
experiment.add_argument('--no-hints', action='store_true',
help="do not give hints")
experiment.add_argument('--hint', action='store_true',
help="give a hint (if available)")
experiment.add_argument('--style', action='store_true',
help="run AutoStyle feedback system")
experiment.add_argument('--collab', action='store_true',
help="launch collaborative programming environment")
# Debug information
debug = parser.add_argument_group('debugging options')
debug.add_argument('--version', action='store_true',
help="print the version number and exit")
debug.add_argument('--tests', action='store_true',
help="display a list of all available tests")
debug.add_argument('--debug', action='store_true',
help="show debugging output")
# Grading
grading = parser.add_argument_group('grading options')
grading.add_argument('--lock', action='store_true',
help="lock the tests in a directory")
grading.add_argument('--score', action='store_true',
help="score the assignment")
grading.add_argument('--score-out', type=argparse.FileType('w'),
default=sys.stdout, help="write scores to a file")
grading.add_argument('--config', type=str,
help="use a specific configuration file")
# Server parameters
server = parser.add_argument_group('server options')
server.add_argument('--local', action='store_true',
help="disable any network activity")
server.add_argument('--server', type=str,
default='okpy.org',
help="set the server address")
server.add_argument('--authenticate', action='store_true',
help="authenticate, ignoring previous authentication")
server.add_argument('--get-token', action='store_true',
help="get ok access token")
server.add_argument('--insecure', action='store_true',
help="use http instead of https")
server.add_argument('--no-update', action='store_true',
help="do not check for ok updates")
server.add_argument('--update', action='store_true',
help="update ok and exit")
return parser.parse_args(command_input)
def main():
"""Run all relevant aspects of ok.py."""
args = parse_input()
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
log.debug(args)
# Checking user's Python bit version
bit_v = (8 * struct.calcsize("P"))
log.debug("Python {}bit".format(bit_v))
if args.version:
print("okpy=={}".format(client.__version__))
exit(0)
elif args.update:
print("Current version: {}".format(client.__version__))
did_update = software_update.check_version(
args.server, client.__version__, client.FILE_NAME, timeout=10)
exit(not did_update) # exit with error if ok failed to update
if args.get_token:
access_token = auth.authenticate(True)
print("Token: {}".format(access_token))
exit(not access_token) # exit with error if no access_token
assign = None
try:
if args.authenticate:
# Authenticate and check for success
if not auth.authenticate(True):
exit(1)
# Instantiating assignment
assign = assignment.load_assignment(args.config, args)
if args.tests:
print('Available tests:')
for name in assign.test_map:
print(' ' + name)
exit(0)
msgs = messages.Messages()
for name, proto in assign.protocol_map.items():
log.info('Execute {}.run()'.format(name))
proto.run(msgs)
msgs['timestamp'] = str(datetime.now())
except ex.LoadingException as e:
log.warning('Assignment could not load', exc_info=True)
print('Error loading assignment: ' + str(e))
except ex.AuthenticationException as e:
log.warning('Authentication exception occurred', exc_info=True)
print('Authentication error: {0}'.format(e))
except ex.OkException as e:
log.warning('General OK exception occurred', exc_info=True)
print('Error: ' + str(e))
except KeyboardInterrupt:
log.info('KeyboardInterrupt received.')
finally:
if not args.no_update:
try:
software_update.check_version(args.server, client.__version__,
client.FILE_NAME)
except KeyboardInterrupt:
pass
if assign:
assign.dump_tests()
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
7722060d91d7e85a924b668264b02a69f3cc3e46 | d15286610e015856f8530850afbf39e08c59a4d8 | /manage.py | 0eb14eafbedc6c3794afdfbfb1bbf6d92b7fa143 | [] | no_license | GiaEla/kalmia | 4389d82e92e802a685c13c68b7ccd3c108884801 | a97432482976137fe05251772ae6fda78b52990c | refs/heads/master | 2021-07-13T07:31:33.845770 | 2017-10-19T21:12:37 | 2017-10-19T21:12:37 | 107,043,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kalmia.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"lavricevaspela@gmail.com"
] | lavricevaspela@gmail.com |
46bf955e07557ee8530320380cf68eb939581578 | 227539d0906cdfbb7cd19f16599c35d5bd09abfd | /Stepik_Adaptive_Python/adaptive-python-en-master/Step 070 Riddle.py | 5762e4cba5be9ed1142cc7c9eba781abb385451a | [] | no_license | solomonli/PycharmProjects | cceb92a11ec1f9e7fef25bca552d8264c75228a0 | 31673627487db1370424f5b0aeee3e20bb23b47a | refs/heads/master | 2021-06-24T11:59:36.365496 | 2019-07-08T09:53:18 | 2019-07-08T09:53:18 | 148,558,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | riddle = '''
{0} and {1} sat in the tree.
{0} had fallen, {1} was stolen.
What's remaining in the tree?
'''
print(riddle.format(input(), input()))
| [
"richdad.solomon@gmail.com"
] | richdad.solomon@gmail.com |
b3717813e643005c192685476f9ee92b411fe642 | 0c5cdf2e5bc2e3018945eec35bb2c3f4a97f218e | /dataset/create_dataset.py | 066e316f8c0a3488b6eec37c9fbd77722a6a0ed3 | [
"Apache-2.0"
] | permissive | Kuanch/tf2.0-model_lib | c3c570e5b38726bd86882f671d6f873255ffda3e | d131101cdc5b2de3fbe24f5f8276d43e93c5f5f2 | refs/heads/master | 2020-05-19T16:36:53.661785 | 2019-05-15T09:59:41 | 2019-05-15T09:59:41 | 185,114,963 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | from __future__ import absolute_import, division, print_function
import tensorflow as tf
def create_dataset(tfrecord_path, num_label, preprocess_fn, batch_size=32, num_epoch=1,
train_image_size=224, shuffle_buffer=100,
is_training=True, cifar10_mode=False):
if cifar10_mode:
if is_training:
(images, labels), _ = tf.keras.datasets.cifar10.load_data()
else:
_, (images, labels) = tf.keras.datasets.cifar10.load_data()
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
cifar10_mode = True
train_image_size = 32
num_label=10
tf.print('Cifar10 mode')
dataset = dataset.map(lambda image, label:preprocess_fn(image, label,
train_image_size, train_image_size,
num_label, is_training=is_training,
cifar10_mode=cifar10_mode),
num_parallel_calls=8)
# Set the number of datapoints you want to load and shuffle
dataset = dataset.shuffle(shuffle_buffer)
# This dataset will go on forever
dataset = dataset.repeat(num_epoch)
dataset = dataset.prefetch(buffer_size=batch_size*2)
# Set the batchsize
dataset = dataset.batch(batch_size, drop_remainder=False)
return dataset | [
"sixigma6@gmail.com"
] | sixigma6@gmail.com |
9b7334738193809ecf0fbf775654704d33b0a53f | e3ce8d42bf2afecc3ef94995386f0731610e2b5e | /exercicio4.py | c1b2820ff4fe33aafe6f037d0279920b17cd3c9e | [] | no_license | luanafortiz/aula3 | 26d150f3eba86cad5bf960b1ba050ad8407c987c | d521c9e711c86f828a6c8b5a9c9d2aca1cea9c91 | refs/heads/master | 2021-01-06T18:33:46.767696 | 2020-02-20T18:40:15 | 2020-02-20T18:40:15 | 241,441,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | x = 1
caracter = input('digite um caracter: ')
caracter1 = caracter
degrau = int(input ('digite a quantidade de degraus desejada: '))
while x <= degrau:
print(caracter1)
caracter1 = caracter1 + caracter
x += 1 | [
"luanadfortiz@gmail.com"
] | luanadfortiz@gmail.com |
c9480092605bb55570bbb1146231d8c1d3809ae8 | 02aa73d429b15d7369f558708dfd56007f0644cf | /csu-sample2/logan.py | 766d1166969e89e38f44c87a141258b4143a6915 | [] | no_license | ripom/pulumi-projects | 0107c3a2915e997238130bb1ab60f57f4853cab5 | 3f3384a28b6ceafddaae009f75dc895e81c85c28 | refs/heads/master | 2022-11-21T00:39:31.619753 | 2020-07-25T13:58:33 | 2020-07-25T13:58:33 | 281,229,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | import pulumi
import pulumi_azure as azure
def logan(rg_name,rg_location):
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("logworkspace",
location=rg_location,
resource_group_name=rg_name,
sku="PerGB2018",
retention_in_days=30)
return example_analytics_workspace.id
| [
"riccardopomato@hotmail.com"
] | riccardopomato@hotmail.com |
fb0325e25eb512db1670e504520644a0e3367d85 | d1cda1783411b2794608b5e8d36cf558cdb9be0a | /jtk/listfiles.py | f6ac7807dfe1125a5875d379d30df57cb93f510d | [] | no_license | joeledwards/jtk | 68a9ade47a736b4b6822878ba440585e8f39ae4f | 703857280ada8f5bc3d310de97afa828ff921fcd | refs/heads/master | 2020-05-23T15:07:10.855766 | 2014-11-11T21:53:28 | 2014-11-11T21:53:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,610 | py | #!/usr/bin/env python
import glob
import optparse
import os
import re
import stat
import sys
"""
A simple tool, similar to the ls command, which provides the biggest missing feature:
prefixing of listed files with their relative or canonical path
"""
PATHS_NONE = 0
PATHS_ABSOLUTE = 1
PATHS_CANONICAL = 2
class ListFiles(object):
def __init__(self, depth=-1, path_mode=PATHS_NONE, ignore_symlinks=False, verbosity=0):
object.__init__(self)
self._depth = depth
self._path_mode = path_mode
self._ignore_symlinks = ignore_symlinks
self._verbosity = verbosity
# TODO: include a dictionary tracking the canonical paths of symlinked
# directories in order to detect loops
# TODO: eventually we can determine whether a symlink steps outside of
# our source directory, and only recurse into those which do not
def process(self, path_list):
for path in path_list:
path = os.path.abspath(path)
if not os.path.exists(path):
print "%s: path not found" % path
else:
self._process_path(path, self._depth)
def _process_path(self, path, depth):
if depth == 0: return
if not os.path.exists(path):
print "%s: path not found" % path
if os.path.isdir(path):
if os.path.basename(path) == ".ssh":
print "%s: skipping SSH config directory " % path
print " (if you want this changed, do it yourself)"
try:
for extension in os.listdir(path):
self._process_path(os.path.abspath(path + '/' + extension), depth - 1)
except OSError, e:
print "%s: cannot read directory contents, permission denied" % path
self._edit_permissions(path)
def _edit_permissions(self, path):
try:
file_stat = os.stat(path)
except:
print "%s: cannot get permissions, permission denied" % path
return
mode = file_stat[stat.ST_MODE]
if stat.S_ISDIR(mode):
if (self._type == TYPE_FILE):
if self._verbosity > 0:
print "skipping directory '%s'" % path
return
if stat.S_ISREG(mode):
if (self._type == TYPE_DIR):
if self._verbosity > 0:
print "skipping regular file '%s'" % path
return
if stat.S_ISCHR(mode):
if (self._type != TYPE_ALL):
if self._verbosity > 0:
print "skipping character device '%s'" % path
return
if stat.S_ISBLK(mode):
if (self._type != TYPE_ALL):
if self._verbosity > 0:
print "skipping block device '%s'" % path
return
if stat.S_ISFIFO(mode):
if (self._type != TYPE_ALL):
if self._verbosity > 0:
print "skipping fifo '%s'" % path
return
if stat.S_ISLNK(mode):
if (self._type != TYPE_ALL):
if self._verbosity > 0:
print "skipping symbolic link '%s'" % path
return
if stat.S_ISSOCK(mode):
if (self._type != TYPE_ALL):
if self._verbosity > 0:
print "skipping socket '%s'" % path
return
self._print_details(path, mode)
def _print_details(self, path, mode):
# TODO: add prefix and details
print path
class Main:
def __init__(self):
option_list = []
option_list.append(optparse.make_option("-a", "--absolute-paths", dest="absolute_paths", action="store_true", help="Will prefix every listed file/directory with its absolute path (not compatible with the -c option)."))
option_list.append(optparse.make_option("-c", "--canonical-paths", dest="canonical_paths", action="store_true", help="Will prefix every listed file/directory with its canonical path (not compatible with the -a option)."))
option_list.append(optparse.make_option("-d", "--depth", dest="depth", action="store", type="int", help="Recurse this many levels (includes current level). Must be an integer greater than zero."))
option_list.append(optparse.make_option("-i", "--ignore-symlinks", dest="ignore_symlinks", action="store_true", help="Ignore symlinks to directories when recursing."))
option_list.append(optparse.make_option("-r", "--recursive", dest="rescursive", action="store_true", help="Recursive directory traversal with no depth limit (see --depth option for limited depth recursion)."))
option_list.append(optparse.make_option("-v", dest="verbosity", action="count", help="specify multiple times to increase verbosity"))
self.parser = optparse.OptionParser(option_list=option_list)
self.parser.set_usage("""Usage: %prog [options] [path1 [path2 ...]]
Lists out .""")
def usage(self, message=''):
if message != '':
print "E:", message
self.parser.print_help()
sys.exit(1)
def start(self):
self.options, self.args = self.parser.parse_args()
regex_mask = re.compile('^[DEIP]{3}$')
depth = 1 # No limit
arg_paths = PATHS_NONE
verbosity = 0
ignore_symlinks = False
if self.options.absolute_paths:
if arg_paths > 0:
self.usage("You may only supply one of -a, -c")
arg_paths = PATHS_ABSOLUTE
if self.options.canonical_paths:
if arg_paths > 0:
self.usage("You may only supply one of -a, -c")
arg_paths = PATHS_CANONICAL
mask = arg_user + arg_group + arg_other
if self.options.depth:
if depth != 1:
self.usage("You may only supply one of -d, -r")
if self.options.depth < 1:
self.usage("Depth must be greater than zero")
depth = self.options.depth
if self.options.recursive:
if depth != 1:
self.usage("You may only supply one of -d, -r")
depth = -1
if self.options.ignore_symlinks:
ignore_symlinks = self.options.ignore_symlinks
if self.options.verbosity:
verbosity = self.options.verbosity
ListFiles(depth, path_mode, ignore_symlinks, verbosity).process(self.args)
if __name__ == '__main__':
try:
Main().start()
except KeyboardInterrupt:
print
| [
"joeledwards@customercentrix.com"
] | joeledwards@customercentrix.com |
53502d11c495fd0841e4a3d9985b568cf2ef3613 | 5cfe4ee704f2362f33e9310a7344911c921ae41b | /src/app.py | b1fd8d6299c8a01b29adf2061fed1633557afb95 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Azure-Samples/azurite-tox-automation | 7136fe733b8188c0780d301d35115729eedbe502 | b124ccc1030c79500564f9ac322877796c8d93ff | refs/heads/main | 2023-09-02T05:49:09.159454 | 2021-11-20T15:40:08 | 2021-11-20T15:40:08 | 426,317,514 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | from typing import List
from azure.storage.blob.aio import BlobClient, BlobServiceClient
from fastapi import APIRouter
import os
from dotenv import load_dotenv
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
load_dotenv(dotenv_path=os.path.join(BASE_DIR, ".env"))
router = APIRouter(
prefix = "/api",
tags=["data"]
)
@router.get("/create")
async def upload_blob():
try:
blob_service_client = BlobServiceClient.from_connection_string(os.environ.get("STORAGE_CONNECTION_STRING"))
container = blob_service_client.get_container_client(os.environ.get("STORAGE_CONTAINER"))
if container.account_name == "" or None :
await blob_service_client.create_container(os.environ.get("STORAGE_CONTAINER"))
with open("../data/data.txt", "rb") as data:
blob_client = container.get_blob_client("data.txt")
await blob_client.upload_blob(data,overwrite=True)
except Exception as e:
print(e)
return False
@router.get("/download", response_model=List[str])
async def download_blob():
try:
blob = BlobClient.from_connection_string(conn_str=os.environ.get("STORAGE_CONNECTION_STRING"),\
container_name=os.environ.get("STORAGE_CONTAINER"),
blob_name="data")
file_download_path = "./data/download.txt"
with open(file_download_path, "wb") as file_download:
blob_data = blob.download_blob()
await blob_data.readinto(file_download)
except Exception as e:
print(e)
return False | [
"gldnblgty@hotmail.com"
] | gldnblgty@hotmail.com |
2ceaf520d25be23c194e7703c0676f8517cb5e3a | 1b307344a0dd5590e204529b7cc7557bed02d2b9 | /eng/mgmt/automation/sdk_generate.py | 832ab0a93ce28a66d728da5804309b2c53731cd4 | [
"LGPL-2.1-or-later",
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"LicenseRef-scancode-generic-cla"
] | permissive | alzimmermsft/azure-sdk-for-java | 7e72a194e488dd441e44e1fd12c0d4c1cacb1726 | 9f5c9b2fd43c2f9f74c4f79d386ae00600dd1bf4 | refs/heads/main | 2023-09-01T00:13:48.628043 | 2023-03-27T09:00:31 | 2023-03-27T09:00:31 | 176,596,152 | 4 | 0 | MIT | 2023-03-08T18:13:24 | 2019-03-19T20:49:38 | Java | UTF-8 | Python | false | false | 10,599 | py | #!/usr/bin/env python3
import os
import re
import sys
import json
import glob
import logging
import argparse
from typing import List
pwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
from parameters import *
from utils import set_or_increase_version
from generate_data import (
get_or_update_sdk_readme,
sdk_automation_readme,
update_readme,
sdk_automation_cadl,
)
from generate_utils import (
compare_with_maven_package,
compile_package,
generate,
get_and_update_service_from_api_specs,
get_suffix_from_api_specs,
update_spec,
)
os.chdir(pwd)
def update_parameters(suffix):
# update changeable parameters in parameters.py
global SUFFIX, NAMESPACE_SUFFIX, ARTIFACT_SUFFIX, NAMESPACE_FORMAT, ARTIFACT_FORMAT, OUTPUT_FOLDER_FORMAT
SUFFIX = suffix
NAMESPACE_SUFFIX = '.{0}'.format(SUFFIX) if SUFFIX else ''
ARTIFACT_SUFFIX = '-{0}'.format(SUFFIX) if SUFFIX else ''
NAMESPACE_FORMAT = 'com.azure.resourcemanager.{{0}}{0}'.format(
NAMESPACE_SUFFIX)
ARTIFACT_FORMAT = 'azure-resourcemanager-{{0}}{0}'.format(ARTIFACT_SUFFIX)
OUTPUT_FOLDER_FORMAT = 'sdk/{{0}}/{0}'.format(ARTIFACT_FORMAT)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
'--spec-root',
default='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/',
help = 'Spec root folder',
)
parser.add_argument(
'-r',
'--readme',
help='Readme path, Sample: "storage" or "specification/storage/resource-manager/readme.md"',
)
parser.add_argument('-t', '--tag', help = 'Specific tag')
parser.add_argument('-v', '--version', help = 'Specific sdk version')
parser.add_argument(
'-s',
'--service',
help = 'Service Name if not the same as spec name',
)
parser.add_argument(
'-u',
'--use',
default = AUTOREST_JAVA,
help = 'Autorest java plugin',
)
parser.add_argument(
'--autorest',
default = AUTOREST_CORE_VERSION,
help = 'Autorest version',
)
parser.add_argument(
'--autorest-options',
default = '',
help = 'Additional autorest options',
)
parser.add_argument('--suffix', help = 'Suffix for namespace and artifact')
parser.add_argument(
'--auto-commit-external-change',
action = 'store_true',
help = 'Automatic commit the generated code',
)
parser.add_argument('--user-name', help = 'User Name for commit')
parser.add_argument('--user-email', help = 'User Email for commit')
parser.add_argument(
'config',
nargs = '*',
)
return parser.parse_args()
def codegen_sdk_automation(config: dict) -> List[dict]:
# priority:
# 1. autorestConfig from input
# 2. swagger/README.md in sdk repository that matches readme from input
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
sdk_root = os.path.abspath(os.path.join(base_dir, SDK_ROOT))
spec_root = os.path.abspath(config['specFolder'])
packages = []
# find readme.md in spec repository
if not config['relatedReadmeMdFile']:
return packages
readme_file_path = config['relatedReadmeMdFile']
match = re.search(
r'(specification)?/?([^/]+)/data-plane(/.*)*/readme.md',
readme_file_path,
re.IGNORECASE,
)
if not match:
logging.info(
'[Skip] readme path:%s does not format as specification/([^/]+)/data-plane(/.*)*/readme.md',
readme_file_path
)
return packages
logging.info('[RESOLVE] README from specification %s', readme_file_path)
sdk_readme_abspath = get_or_update_sdk_readme(config, readme_file_path)
if sdk_readme_abspath:
spec_readme_abspath = os.path.join(spec_root, readme_file_path)
update_readme(sdk_readme_abspath, spec_readme_abspath)
sdk_automation_readme(sdk_readme_abspath, packages, sdk_root)
return packages
def sdk_automation(input_file: str, output_file: str):
with open(input_file, 'r') as fin:
config = json.load(fin)
logging.info(f"sdk_automation input: {config}")
# cadl
packages = sdk_automation_cadl(config)
# autorest
if not packages:
packages = sdk_automation_autorest(config)
with open(output_file, 'w') as fout:
output = {
'packages': packages,
}
json.dump(output, fout)
def sdk_automation_autorest(config: dict) -> List[dict]:
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
sdk_root = os.path.abspath(os.path.join(base_dir, SDK_ROOT))
api_specs_file = os.path.join(base_dir, API_SPECS_FILE)
packages = []
if 'relatedReadmeMdFile' not in config or not config['relatedReadmeMdFile']:
return packages
readme = config['relatedReadmeMdFile']
match = re.search(
'(specification)?/?([^/]+)/resource-manager(/.*)*/readme.md',
readme,
re.IGNORECASE,
)
if not match:
logging.info(
'[Skip] readme path does not format as */resource-manager/*/readme.md'
)
else:
spec = match.group(2)
spec = update_spec(spec, match.group(3))
service = get_and_update_service_from_api_specs(
api_specs_file, spec)
pre_suffix = SUFFIX
suffix = get_suffix_from_api_specs(api_specs_file, spec)
if suffix is None:
suffix = SUFFIX
update_parameters(suffix)
# TODO: use specific function to detect tag in "resources"
tag = None
if service == 'resources':
with open(os.path.join(config['specFolder'], readme)) as fin:
tag_match = re.search(r'tag: (package-resources-\S+)',
fin.read())
if tag_match:
tag = tag_match.group(1)
else:
tag = 'package-resources-2021-01'
module = ARTIFACT_FORMAT.format(service)
output_folder = OUTPUT_FOLDER_FORMAT.format(service)
namespace = NAMESPACE_FORMAT.format(service)
stable_version, current_version = set_or_increase_version(
sdk_root,
GROUP_ID,
module
)
succeeded = generate(
sdk_root,
service,
spec_root = config['specFolder'],
readme = readme,
autorest = AUTOREST_CORE_VERSION,
use = AUTOREST_JAVA,
output_folder = output_folder,
module = module,
namespace = namespace,
tag = tag,
)
if succeeded:
compile_package(sdk_root, module)
packages.append({
'packageName':
'{0}'.format(ARTIFACT_FORMAT.format(service)),
'path': [
output_folder,
CI_FILE_FORMAT.format(service),
POM_FILE_FORMAT.format(service),
'eng/versioning',
'pom.xml',
],
'readmeMd': [readme],
'artifacts': ['{0}/pom.xml'.format(output_folder)] +
[jar for jar in glob.glob('{0}/target/*.jar'.format(output_folder))],
'apiViewArtifact': next(iter(glob.glob('{0}/target/*-sources.jar'.format(output_folder))), None),
'language': 'Java',
'result': 'succeeded' if succeeded else 'failed',
'packageFolder': output_folder,
})
update_parameters(pre_suffix)
if not packages:
# try data-plane codegen
packages = codegen_sdk_automation(config)
for package in packages:
if len(package['path']) > 0:
package['packageFolder'] = package['path'][0]
return packages
def main():
args = vars(parse_args())
if args.get('config'):
return sdk_automation(args['config'][0], args['config'][1])
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
sdk_root = os.path.abspath(os.path.join(base_dir, SDK_ROOT))
api_specs_file = os.path.join(base_dir, API_SPECS_FILE)
readme = args['readme']
match = re.match(
'specification/([^/]+)/resource-manager/readme.md',
readme,
re.IGNORECASE,
)
if not match:
spec = readme
readme = 'specification/{0}/resource-manager/readme.md'.format(spec)
else:
spec = match.group(1)
spec = update_spec(spec, match.group(2))
args['readme'] = readme
args['spec'] = spec
# update_parameters(
# args.get('suffix') or get_suffix_from_api_specs(api_specs_file, spec))
update_parameters(args.get('suffix'))
service = get_and_update_service_from_api_specs(api_specs_file, spec,
args['service'])
args['service'] = service
module = ARTIFACT_FORMAT.format(service)
stable_version, current_version = set_or_increase_version(sdk_root, GROUP_ID, module, **args)
args['version'] = current_version
output_folder = OUTPUT_FOLDER_FORMAT.format(service),
namespace = NAMESPACE_FORMAT.format(service)
succeeded = generate(
sdk_root,
module = module,
output_folder = output_folder,
namespace = namespace,
**args
)
if succeeded:
succeeded = compile_package(sdk_root, module)
if succeeded:
compare_with_maven_package(sdk_root, service, stable_version,
current_version, module)
if args.get('auto_commit_external_change') and args.get('user_name') and args.get('user_email'):
pwd = os.getcwd()
try:
os.chdir(sdk_root)
os.system('git add eng/versioning eng/mgmt pom.xml {0} {1}'.format(
CI_FILE_FORMAT.format(service),
POM_FILE_FORMAT.format(service)))
os.system(
'git -c user.name={0} -c user.email={1} commit -m "[Automation] External Change"'
.format(args['user_name'], args['user_email']))
finally:
os.chdir(pwd)
if not succeeded:
raise RuntimeError('Failed to generate code or compile the package')
if __name__ == '__main__':
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %X',
)
main()
| [
"noreply@github.com"
] | noreply@github.com |
42443cb6499ce86d0a03310e71cda7aa7d0de443 | 04ede512c840b6b65362f5b3d952ffd52a213d7a | /Basics/pernicious.py | c289bb96e4430128ce3bcf21533e3b9b1d78b5f9 | [] | no_license | not-an-anagram-lover/Python-Hello-World | 758407ede47a5e89f14d744c19a25dfb79c60017 | 689127b25aef9207a168723065b03fbccf575ab2 | refs/heads/master | 2021-09-17T13:10:58.337069 | 2018-07-02T07:07:29 | 2018-07-02T07:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | #https://stackoverflow.com/questions/699866/python-int-to-binary?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
"""a=int(input('enter a number'))
print(a)
a=bin(a)
print(a)"""
b=int(input('enter another'))
x=format(b,'b')#not working
print(x)
x=int(x)
g=x
sum=0
while(x!=0):
r=x%10
r=int(r)
sum+=r
x=x/10
x=int(x)
print(sum)
flag=0
print('now checking if prime or not')
k=sum/2
k=int(k)
for i in range(2,k+1 ):
if(sum%i==0):
print('not pernicious')
flag=1
if(flag==0):
print('pernicious') | [
"noreply@github.com"
] | noreply@github.com |
8933ca2ec3a2423458fe5b42177d75cc4c418d06 | 160372edc77dee6664e9030d693eab21c0125e2f | /utils.py | 2ec2dc89de7fc30121c4b3b60a381595b7f0f085 | [] | no_license | juju-w/Image-Denoise-using-Wasserstein-GAN | 2c8ebd894071fea6d27cfac6716b70317d5ea4d7 | ba912daaa858e6aef5067cc3ce5fa660763daa90 | refs/heads/main | 2023-08-29T12:21:03.335661 | 2021-09-10T14:19:46 | 2021-09-10T14:19:46 | 363,647,564 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,302 | py | # -*- coding: utf-8 -*-
"""
@Time : 2020/11/21 16:05
@Auth : kuiju_wang
@File :utils.py.py
@IDE :PyCharm
"""
import tensorflow as tf
import os
import cv2
import numpy as np
from tensorflow.keras.losses import MSE
from skimage.filters import gaussian
import random
from config import *
def batch_ssim_psnr_sum(batch_turth, batch_fake):
ssim = tf.reduce_mean(tf.image.ssim(batch_turth, batch_fake, 1.0))
ssim = tf.multiply(ssim, SSIM_FACTOR)
psnr = tf.reduce_mean(tf.image.psnr(batch_turth, batch_fake, 1.0))
psnr = tf.multiply(psnr, PSNR_FACTOR)
return tf.add(ssim, psnr)
def batch_ssim_psnr_show(batch_turth, batch_fake):
ssim = tf.reduce_mean(tf.image.ssim(batch_turth, batch_fake, 1.0))
psnr = tf.reduce_mean(tf.image.psnr(batch_turth, batch_fake, 1.0))
return ssim, psnr
def img_cut_resize():
root_dir = './data/bsds500/origal'
goal_dir = './data/bsds500/256_cut'
dir_list = os.listdir(root_dir)
for name in dir_list:
flag = 0
img = cv2.imread(os.path.join(root_dir, name))
imshape = img.shape
if [imshape[0], imshape[1]] == [321, 481]:
flag = 1
if flag:
img = cv2.flip(cv2.transpose(img), 0)
img = img[80:401, :, :]
if flag:
img = cv2.flip(cv2.transpose(img), 1)
img = cv2.resize(img, (256, 256))
cv2.imwrite(os.path.join(goal_dir, name), img)
def zero_one_scale(array_high_dim, tenor=False):
num = array_high_dim.shape[0]
dim = array_high_dim.shape[-1]
for n in range(num):
for d in range(dim):
array = array_high_dim[n][:, :, d]
max_num = np.max(array)
min_num = np.min(array)
array_high_dim[n][:, :, d] = (array - min_num) / (max_num - min_num)
if tenor:
return tf.convert_to_tensor(array_high_dim)
else:
return array_high_dim
def tensor_normalization(tenor):
num = BATCH_SIZE
dim = tenor.shape[-1]
for n in range(num):
for d in range(dim):
array = tenor[n][:, :, d]
max_num = tf.reduce_max(array)
min_num = tf.reduce_min(array)
if d == 0:
x = tf.divide(tf.subtract(array, min_num), (max_num - min_num))
x = tf.reshape(x, (x.shape[0], x.shape[1], 1))
xshape = x.shape
else:
x = tf.concat([x, tf.reshape(tf.divide(tf.subtract(array, min_num), (max_num - min_num)), xshape)], -1)
if n == 0:
y = x
y = tf.reshape(y, (1, y.shape[0], y.shape[1], y.shape[2]))
yshape = y.shape
else:
y = tf.concat([y, tf.reshape(x, yshape)], 0)
return y
def gausseimg(tenor):
image = tensor_normalization(tenor)
for n in range(BATCH_SIZE):
img = image[n].eval()
gauss_out = gaussian(img, sigma=5, multichannel=True)
img_out = img - gauss_out + 127.0
img_out = img_out / 255.0
# 饱和处理
mask_1 = img_out < 0
mask_2 = img_out > 1
img_out = img_out * (1 - mask_1)
img_out = img_out * (1 - mask_2) + mask_2
image[n] = tf.convert_to_tensor(Soft_light(img_out, img))
return image
def Soft_light(img_1, img_2):
mask = img_1 < 0.5
T1 = (2 * img_1 - 1) * (img_2 - img_2 * img_2) + img_2
T2 = (2 * img_1 - 1) * (np.sqrt(img_2) - img_2) + img_2
img = T1 * mask + T2 * (1 - mask)
return img
def add_gauss_noise2img(mean, var, image=None):
if image is None:
root_dir = 'S:/study/graduatio_design/code_work/data/data_use/256_cut_origal'
goal_dir = 'S:/study/graduatio_design/code_work/data/data_use/var_15'
dir_list = os.listdir(root_dir)
for name in dir_list:
img = cv2.imread(os.path.join(root_dir, name))
img = np.array(img / 255, dtype=float)
np.random.seed(666)
noise = np.random.normal(mean, (var / 255.0) ** 2, img.shape)
img = img + noise
if img.min() < 0:
low_clip = -1.
else:
low_clip = 0.
img = np.clip(img, low_clip, 1.0)
img = np.uint8(img * 255)
cv2.imwrite(os.path.join(goal_dir, name), img)
else:
img = image.astype(np.float32)
noise = np.random.normal(mean, var ** 0.5, img.shape)
img = img + noise
return zero_one_scale(img)
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
def dataset_truth_build():
print("reading raw image dataset")
origin_dir = TRAIN_CLEAN_PATH
dir_list = os.listdir(origin_dir)
batch = (np.array([np.array(cv2.imread(os.path.join(origin_dir, name))) for name in dir_list]).astype(
np.float32)) / 255.0
print('done')
return batch
def dataset_noise_build():
print("reading noise dataset")
origin_dir = TRAIN_NOISE_PATH
dir_list = os.listdir(origin_dir)
batch = (np.array([np.array(cv2.imread(os.path.join(origin_dir, name))) for name in dir_list]).astype(
np.float32)) / 255.0
print('done')
return batch
def get_patch(raw, noise, patch_num=PATCH_NUM, patch_size=PATCH_SHAPE[1]):
out_raw = []
out_noise = []
max_x_y = raw.shape[1] - patch_size
print("generating patches")
for n in range(raw.shape[0]):
for pn in range(patch_num):
rx = random.randint(0, max_x_y)
ry = random.randint(0, max_x_y)
rf = random.choice([-1, 0, 1, None])
if rf is not None:
out_raw.append(cv2.flip(raw[n], rf)[rx:rx + patch_size, ry:ry + patch_size, :])
out_noise.append(cv2.flip(noise[n], rf)[rx:rx + patch_size, ry:ry + patch_size, :])
else:
out_raw.append(raw[n][rx:rx + patch_size, ry:ry + patch_size, :])
out_noise.append(noise[n][rx:rx + patch_size, ry:ry + patch_size, :])
SEED = np.random.randint(0, 10000)
np.random.seed(SEED)
np.random.shuffle(out_raw)
np.random.seed(SEED)
np.random.shuffle(out_noise)
print('done')
return np.array(out_raw), np.array(out_noise)
def loss_ones(logits):
labels = tf.ones_like(logits)
loss = tf.keras.losses.binary_crossentropy(y_true=labels, y_pred=logits, from_logits=True)
return tf.reduce_mean(loss)
def loss_zeros(logits):
labels = tf.zeros_like(logits)
loss = tf.keras.losses.binary_crossentropy(y_true=labels, y_pred=logits, from_logits=True)
return tf.reduce_mean(loss)
def d_loss_fn(generator, discriminator, batch_noise, batch_truth):
batch_fake = generator(batch_noise)
d_fake_logits = discriminator(batch_fake)
d_truth_logits = discriminator(batch_truth)
d_loss_fake = loss_zeros(d_fake_logits)
d_loss_truth = loss_ones(d_truth_logits)
return tf.multiply(tf.add(d_loss_fake, d_loss_truth), D_LOSS_FACTOR)
def g_loss_fn(generator, discriminator, batch_noise, batch_truth):
batch_fake = generator(batch_noise)
g_fake_logits = discriminator(batch_fake)
SPLOSS = tf.multiply(batch_ssim_psnr_sum(batch_truth, batch_fake), SP_LOSS_FACTOR)
WLoss = tf.multiply(loss_ones(g_fake_logits), ADVERSARIAL_LOSS_FACTOR)
PLoss = tf.multiply(get_pixel_loss(batch_truth, batch_fake), PIXEL_LOSS_FACTOR)
SLoss = tf.multiply(get_smooth_loss(batch_fake), SMOOTH_LOSS_FACTOR)
loss = tf.add(tf.add(WLoss, PLoss), tf.add(SLoss, SPLOSS))
return loss
def RGB_TO_BGR(img):
img_channel_swap = img[..., ::-1]
# img_channel_swap_1 = tf.reverse(img, axis=[-1])
return img_channel_swap
def get_pixel_loss(target, prediction):
return tf.reduce_sum(MSE(target, prediction))
def get_smooth_loss(image):
batch_count = tf.shape(image)[0]
image_height = tf.shape(image)[1]
image_width = tf.shape(image)[2]
horizontal_normal = tf.slice(image, [0, 0, 0, 0], [batch_count, image_height, image_width - 1, 3])
horizontal_one_right = tf.slice(image, [0, 0, 1, 0], [batch_count, image_height, image_width - 1, 3])
vertical_normal = tf.slice(image, [0, 0, 0, 0], [batch_count, image_height - 1, image_width, 3])
vertical_one_right = tf.slice(image, [0, 1, 0, 0], [batch_count, image_height - 1, image_width, 3])
smooth_loss = tf.nn.l2_loss(horizontal_normal - horizontal_one_right) + tf.nn.l2_loss(
vertical_normal - vertical_one_right)
return smooth_loss
def read_img_2_array(path):
dir_list = os.listdir(path)
batch = (np.array([np.array(cv2.imread(os.path.join(path, name))) for name in dir_list]).astype(
np.float32)) / 255.0
return batch
def val_truth():
root_dir = VAL_CLEAN_PATH
dir_list = os.listdir(root_dir)
batch = (np.array([np.array(cv2.imread(os.path.join(root_dir, name))) for name in dir_list]).astype(
np.float32)) / 255.0
return batch
def val_noise():
root_dir = VAL_NOISE_PATH
dir_list = os.listdir(root_dir)
batch = (np.array([np.array(cv2.imread(os.path.join(root_dir, name))) for name in dir_list]).astype(
np.float32)) / 255.0
return tf.convert_to_tensor(batch)
def MaxMinNormalization(x, Max, Min):
x = (x - Min) / (Max - Min)
return x
| [
"847459455@qq.com"
] | 847459455@qq.com |
2fff3390b23f34ecccaa20ba3b41671bdfaebfa5 | e3cd9de7d7e68e5995680a297fa25652487b0d02 | /tests/sum_squares_test.py | b2ef648f012073ee2f9ded722f3ce60b17d76950 | [
"Apache-2.0"
] | permissive | bsaghafi/erdos | 2293993bb336d0a9466a17cc15236390c379d8f8 | ac27a9607f2550bbac999a0c5fb36c84c2860d2e | refs/heads/master | 2020-08-21T02:11:06.982785 | 2019-06-26T23:55:44 | 2019-06-26T23:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
from erdos.data_stream import DataStream
from erdos.message import Message
import erdos.graph
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import frequency
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
class IntegerOp(Op):
"""Operator which publishes an integer every second"""
def __init__(self, name, number):
super(IntegerOp, self).__init__(name)
self.number = np.int64(number)
@staticmethod
def setup_streams(input_streams):
return [DataStream(name="integer_out")]
@frequency(1)
def publish_random_number(self):
output_msg = Message(self.number, Timestamp(coordinates=[0]))
self.get_output_stream("integer_out").send(output_msg)
print("%s sent %d" % (self.name, self.number))
def execute(self):
self.publish_random_number()
self.spin()
class SquareOp(Op):
"""Operator which publishes the square of its input"""
def __init__(self, name):
super(SquareOp, self).__init__(name)
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SquareOp.on_next)
return [DataStream(name="square_output")]
def on_next(self, msg):
value = msg.data
result = value**2
self.get_output_stream("square_output").send(
Message(result, msg.timestamp))
print("%s received: %d ^ 2 = %d" % (self.name, value, result))
def execute(self):
self.spin()
class SumOp(Op):
"""Operator which sums the most recently published values for each input.
Sum operation occurs once every second.
"""
def __init__(self, name):
super(SumOp, self).__init__(name)
self.sum = 0
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SumOp.add)
return [DataStream(name="sum_output")]
@frequency(1)
def publish_sum(self):
result = self.sum
output_msg = Message(result, Timestamp(coordinates=[0]))
self.get_output_stream("sum_output").send(output_msg)
def add(self, msg):
value = msg.data
original = self.sum
self.sum += msg.data
print("%s: %d (original) + %d (received) = %d (result)"
% (self.name, original, value, self.sum))
def execute(self):
self.publish_sum()
self.spin()
def main(argv):
"""Sums the squares of 2 numbers. """
# Set up graph
graph = erdos.graph.get_current_graph()
# Add operators
int1 = graph.add(IntegerOp, name='int1', init_args={'number': 1})
int2 = graph.add(IntegerOp, name='int2', init_args={'number': 2})
square1 = graph.add(SquareOp, name='square')
square2 = graph.add(SquareOp, name='square2')
sum = graph.add(SumOp, name='sum')
# Connect operators
graph.connect([int1], [square1])
graph.connect([int2], [square2])
graph.connect([square1, square2], [sum])
# Execute graph
graph.execute(FLAGS.framework)
if __name__ == "__main__":
app.run(main)
| [
"gogionel@gmail.com"
] | gogionel@gmail.com |
7700fffdae40275608253f363c62056ca68e7520 | fbe5a5796c6caccf506220fd31da9ab2123cf70b | /hello_app/views.py | ee95d7678b713bd567458039562e2a7a7af3853c | [] | no_license | bhavanapamulaparthi/travello | af7d756151d993a70b5be2a8e7751407a7293172 | 318800aa1fb3f22bf1b4fa9e7b7c0bc88001adb6 | refs/heads/master | 2022-11-22T10:30:41.331128 | 2020-07-23T13:59:50 | 2020-07-23T13:59:50 | 281,964,890 | 0 | 1 | null | 2020-07-23T13:59:52 | 2020-07-23T13:51:47 | Python | UTF-8 | Python | false | false | 138 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'s.html',{'name':'bhavana'})
| [
"harish@Harishs-MacBook-Air.local"
] | harish@Harishs-MacBook-Air.local |
2777ad6217765b315c2d108e6d524be00b797a6b | 8a1fe2825d030710e85e9cf9e8f96e962f0f11af | /2.1_svm_mnist.py | efc647fc22ba271a5702815471cd3eaa151bc259 | [] | no_license | LeoSf/svm_py | 3e14ead2d57e3d0e94489b41b7a0556dee925961 | c897808e2be843badd6854ec1c93ae0801233f05 | refs/heads/master | 2022-11-16T06:27:02.314318 | 2020-07-14T06:33:40 | 2020-07-14T06:33:40 | 275,107,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | # sklearn.svm: Support Vector Machines
# svm.SVC(*[, C, kernel, degree, gamma, …])
# C-Support Vector Classification.
from __future__ import print_function, division
from builtins import range
from sklearn.svm import SVC
from util import getKaggleMNIST
from datetime import datetime
# get the data: https://www.kaggle.com/c/digit-recognizer
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
#model = SVC()
model = SVC(C=5., gamma=.05)
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("train duration:", datetime.now() - t0)
t0 = datetime.now()
print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0)
t0 = datetime.now()
print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0)
# Outputs:
# model = SVC()
# (svm) D:\Repos\courses\svm>python svm_mnist.py
# train duration: 0:04:18.895413
# train score: 0.9891219512195122 duration: 0:07:47.970313
# test score: 0.985 duration: 0:00:11.319317
# model = SVC(C=5., gamma=.05)
# (svm) D:\Repos\courses\svm>python 2.1_svm_mnist.py
# train duration: 0:14:40.831508
# train score: 1.0 duration: 0:12:51.490385
# test score: 0.974 duration: 0:00:18.497260 | [
"leomedus@gmail.com"
] | leomedus@gmail.com |
bf6422eb78f6c700211eaab310ce54a6a70d1a4b | 22c56d6cb744a0b7a5879376bed0f8e12abbf357 | /14_xi/04_ParallelogramVOn4Lines.py | 3137178f0acbda5f06e2778f3972f981a83f2fb7 | [
"MIT"
] | permissive | mirefek/py_euclidea | 8854bd648e4e5cbadaca9d48fffb6f31d5a3447e | 8e400cbf36e3c8919fcc0032b7a95ce55012416e | refs/heads/master | 2023-08-30T14:12:28.195003 | 2021-11-16T21:02:20 | 2021-11-16T21:02:20 | 215,083,101 | 7 | 3 | null | 2021-10-05T15:56:38 | 2019-10-14T15:45:21 | Python | UTF-8 | Python | false | false | 1,105 | py | from constructions import *
def init(env):
A = env.add_free(263.0, 116.0, hidden = True)
B = env.add_free(488.5, 335.0, hidden = True)
C = env.add_free(140.0, 335.0, hidden = True)
X = env.add_free(280.0, 181.5, hidden = True)
l1 = env.add_line(A,B)
l2 = env.add_line(A,C)
l3 = env.add_line(B,C)
l4 = env.add_constr(parallel_tool, (l3,X), Line)
M = env.add_free(296.5, 235.5)
env.set_tools(
"move", "point", "line", "circle",
"perp_bisector", "angle_bisector",
"perpendicular", "parallel",
"compass", "intersection",
)
env.goal_params(l1,l2,l3,l4,M)
def construct_goals(l1,l2,l3_in,l4_in,M):
result = []
for (l3,l4) in (l3_in,l4_in), (l4_in,l3_in):
A = intersection_tool(l1, reflect_by_point(l3, M))
B = intersection_tool(l2, reflect_by_point(l4, M))
C = reflect_by_point(A, M)
D = reflect_by_point(B, M)
result.append((
segment_tool(A,B),
segment_tool(B,C),
segment_tool(C,D),
segment_tool(D,A),
))
return result
| [
"mirek@olsak.net"
] | mirek@olsak.net |
6716dd63b9b62ad8b5ceac1c7b269df4a58df92d | 595c9c7aeacbe536525356207ae1ef1d85f9f12f | /bert/bm25_top100_test_write_results.py | e917d774b142cb15edab0ead66a76a11e0bfd3b0 | [
"BSD-3-Clause"
] | permissive | berkayalkan/cmpe493-term | 8d529f8123be6fbcbcf92de744b6f6c9c91894cf | 51662c1cf54fbb287c2547e74ac9066084d19812 | refs/heads/master | 2023-03-13T14:10:27.257833 | 2021-03-07T23:47:10 | 2021-03-07T23:47:10 | 325,397,556 | 0 | 0 | null | 2021-03-07T23:45:52 | 2020-12-29T22:13:15 | Python | UTF-8 | Python | false | false | 601 | py | import pickle
from file_operation import write_results
if __name__=="__main__":
f = open("input/bm25_all.pickle", "rb")
bm25_dict = pickle.load(f)
f.close()
bm25_top100_test_dict = {}
for i in range(2, 51, 2):
temp_bm25_dict = bm25_dict[str(i)]
sorted_bm25 = list(dict(sorted(temp_bm25_dict.items(), key=lambda item: item[1], reverse=True)).keys())[:100]
temp_dict = {}
for doc_id in sorted_bm25:
temp_dict[doc_id] = bm25_dict[str(i)][doc_id]
bm25_top100_test_dict[str(i)] = temp_dict
write_results(bm25_top100_test_dict)
| [
"brkyalkn13@gmail.com"
] | brkyalkn13@gmail.com |
957ec6a92fdd50434b0a0f700c5efd39369d088b | 9607703f72032511f8d4ef3aae780fef487ce832 | /speechsyn/speaker.py | 6b9dcbcddbfc74a87991dd8580acf71196e51cb2 | [
"Apache-2.0"
] | permissive | sokolegg/speechsyn | 991879705c0714961d021650a6fd2b9915f1894b | 7d63d052b3d0b8dddf16f367594989ead5e35d9a | refs/heads/master | 2020-09-22T12:41:16.824864 | 2020-02-04T12:38:40 | 2020-02-04T12:38:40 | 225,198,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,056 | py | from __future__ import print_function
from speechsyn import hyperparams
from speechsyn.hyperparams import hyperparams as hp
import tqdm
from speechsyn.data_load import load_from_lines, load_vocab
import tensorflow as tf
from speechsyn.train import Graph
from speechsyn.utils import spectrogram2wav, plot_test_alignment
from scipy.io.wavfile import write
import os
import numpy as np
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
SEC_PER_CHAR = float(10) / 180 # [sec/char]
SEC_PER_ITER = float(12) / 200 # [sec/iter]
def get_EOS_index(text):
# Load vocab
char2idx, idx2char = load_vocab()
_text = np.array([idx2char[t] for t in text])
return np.argmax(_text == hp.EOS_char)
def get_EOS_fire(alignment, text):
EOS_index = get_EOS_index(text)
text_max_indicies = np.argmax(alignment, axis=0)
r = []
for i, max_index in enumerate(text_max_indicies):
if max_index == EOS_index:
r.append(i)
if not len(r) == 0:
return max(r)
return None
def synthesize(phrases):
if not os.path.exists(hp.sampledir): os.mkdir(hp.sampledir)
# Load graph
g = Graph(mode="synthesize");
print("Graph loaded")
# Load data
if phrases is str:
texts, max_len = load_from_lines([phrases])
else:
texts, max_len = load_from_lines([phrases])
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(hp.syn_logdir));
print("Restored!")
# Feed Forward
## mel
size = 10
y_hat = np.zeros((texts.shape[0], size, hp.n_mels * hp.r), np.float32)
for j in tqdm.tqdm(range(size)):
_y_hat = sess.run(g.y_hat, {g.x: texts, g.y: y_hat})
y_hat[:, j, :] = _y_hat[:, j, :]
## alignments
alignments = sess.run([g.alignments], {g.x: texts, g.y: y_hat})[0]
## mag
mags = sess.run(g.z_hat, {g.y_hat: y_hat})
print('Len of mags', len(mags))
for i, mag in enumerate(mags):
print("File {}.wav is being generated ...".format(i + 1))
text, alignment = texts[i], alignments[i]
print(alignment.shape)
print("len text", float(len(text)))
min_sample_sec = float(get_EOS_index(text)) * SEC_PER_CHAR
print("min sec ", min_sample_sec)
al_EOS_index = get_EOS_fire(alignment, text)
al_EOS_index = None
if not al_EOS_index == None:
# trim the audio
audio = spectrogram2wav(mag[:al_EOS_index * hp.r, :])
else:
audio = spectrogram2wav(mag, min_sample_sec)
audio_path = os.path.join(hp.sampledir, '{}.wav'.format(i + 1))
write(audio_path, hp.sr, audio)
print(audio_path)
return audio_path
if __name__ == '__main__':
args = sys.argv[1:]
lang = args[0]
hyperparams._H =hyperparams.Hyperparams(lang)
text = args[1]
synthesize(text)
print("Done") | [
"sokolegg@yandex.ru"
] | sokolegg@yandex.ru |
ac8fab18e142bacb7e320a1eb8d9591f98d2df8b | fcc87644263c563a36e19a76ec7cbad168fd36ed | /dataset/stemToFile.py | a8e08816dec979ced694d39a6e41d4fd9191684a | [] | no_license | theonlydo/naive-bayes | f1a20226f4ae063d508f5a4367e10f3a42fd638b | c59deaf9a87a59b028ff11dbe50a07256048dc78 | refs/heads/master | 2022-07-04T18:49:35.384608 | 2020-05-16T12:47:11 | 2020-05-16T12:47:11 | 264,302,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | import csv
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary
# create stemmer
factory = StemmerFactory()
stemmer = factory.create_stemmer()
factory = StopWordRemoverFactory()
stopwords = factory.get_stop_words()
more_stopword = []
with open('stopword.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
more_stopword.append(row[0])
print more_stopword
dictionary = ArrayDictionary(more_stopword)
str = StopWordRemover(dictionary)
with open('marketing.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
out = stemmer.stem(row[0])
out = str.remove(out)
print(out)
f = open("marketing.txt", "a")
f.write(out+"\n")
| [
"ridho.adep@gmail.com"
] | ridho.adep@gmail.com |
6e4322ec20aaed33338dac27baee5d5754f94362 | e04496844a7ba69196da845c965a3c04c297eacc | /power_bayes3.py | a80211b5b56329ed03603227976c53067c09440b | [] | no_license | jiangzhongkai/python_1 | 909857ac3b688421debb5c901b1d0ab6f8778f48 | 4289de470762c112f489bbc7b62046c67af12193 | refs/heads/master | 2020-03-17T05:07:44.938770 | 2018-05-22T05:13:39 | 2018-05-22T05:13:39 | 133,303,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,171 | py | # coding: utf-8
import numpy as np
import power_demand4 as pod4
import matplotlib.pyplot as plt
import math
import random
from sklearn.metrics import roc_curve
'''
本代码文件是利用误差数据来进行分类的,属于LSTM+Bayes的Bayes预测部分
1.根据之前power_demand4得到的误差数据集,连接训练集和测试集误差数据集。
2.再将纯误差数据和误差数据的标签合并,同时将训练的正常车辆数据集和测试的异常车辆数据集合并。
3.为了使得模型更加健壮,因此需要打乱数据集,所以接着获取误差数据集大小的一个全排列索引。
4.与此同时让索引以相同的方式打乱误差数据集和传感器收集到的数据集。
5.然后从误差数据集中分离训练集和测试集。
6.接着以每个时间点的误差当做属性,利用训练集的样本构建构建每个属性的标准差和均值。
7.然后利用标准差和均值构建高斯分布概率密度函数。
8.再根据高斯密度函数计算每个样本属于某个类别的概率,根据属性的标准差和均值来计算概率,再累乘,得到属于某个类别的概率。
'''
# 由于之前输入模型时,前面timestep个数据并没有输入到模型中,被舍去了,
# 故而现在在恢复预测结果时,也要删去剩下的(一个序列本来的长度-timestep)个数据,
# 保持真实数据与预测数据同步
def data_recovery(dataset, timesteps=10, length=18):
dataset = dataset[length - timesteps:]
dataset = dataset.reshape([-1,length])
return dataset
'''
# 由于之前输入模型时,前面timestep个数据并没有输入到模型中,被舍去了,
# 故而现在在恢复预测结果时,也要删去剩下的(一个序列本来的长度-timestep)个数据,
# 保持真实数据与预测数据同步
# 接下来把数据进行逆归一化,还原成数据本来的样子
'''
def inverseNorm(data, norm, timesteps=20):
data = data[84-20:]
data = data.reshape([-1,84])
# print norm['MaxMin']
data_inversed = data * norm['MaxMin'][1:] + norm['Min'][1:]
return data_inversed
# 构建误差数据集
def createErrDataset():
_, train_y, _, test_y, train_norm, test_norm = pod4.rnn_data('./power_data.txt')
# 拿到之前模型的预测结果,train_result表示为训练集输入模型得到的预测结果
train_result = np.loadtxt('./train_result3.txt')
test_result = np.loadtxt('./test_result3.txt')
train_true = inverseNorm(train_y, train_norm)
train_predict = inverseNorm(train_result, train_norm)
test_true = inverseNorm(test_y, test_norm)
test_predict = inverseNorm(test_result, test_norm)
# 真实与预测的误差
error_normal = np.abs(train_true - train_predict)
error_abnormal = np.abs(test_true - test_predict)
error_abnormal = np.tile(error_abnormal, [4,1])
# print error_abnormal.shape
# 连接两个误差数据集
error = np.concatenate((error_normal, error_abnormal), axis=0)
# print error.shape
# 是否为异常的标签,1代表是异常
exception_label = [0 for i in range(len(error_normal))] + [1 for i in range(len(error_abnormal))]
exception_label = np.array(exception_label).reshape([len(exception_label), 1])
# 将纯误差数据和是否异常的标签合并
error_dataset = np.concatenate((error, exception_label), axis=1)
# print error_dataset.shape
# # 将训练的正常数据集和测试的异常数据集合并
# power_data = np.concatenate((train_true, test_true), axis=0)
# # 再将的总的数据集与异常标签合并
# power_data = np.concatenate((power_data, exception_label), axis=1)
# print power_data.shape # (51,83)
return error_dataset, exception_label
# 从误差数据集中分离训练集和测试集
def splitErrDataset(dataset, ratio=0.8):
train_nums = int(len(dataset)*ratio)
trainset = dataset[0:train_nums]
testset = dataset[train_nums:len(dataset)]
return trainset, testset
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg, 2) for x in numbers]) / float(len(numbers)-1) # 注意我们使用N-1的方法(译者注:参见无偏估计),也就是在在计算方差时,属性值的个数减1。
return math.sqrt(variance)
# 根据均值和标准差建立高斯概率密度分布模型
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x-mean, 2) / (2*math.pow(stdev,2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
# 包含数据集中每个属性的均值和标准差
def summarize(dataset):
summaries =[(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
# 注:在函数调用中使用*list/tuple的方式表示将list/tuple分开,作为位置参数传递给对应函数(前提是对应函数支持不定个数的位置参数)
# 删除最后的标签
del summaries[-1]
return summaries
# 按类别划分数据
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if vector[-1] not in separated:
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
# 按类别提取属性特征
def summarizeByClass(dataset):
separated = separateByClass(dataset)
# for i in range(len(separated)):
# attribute = separated[i]
# summaries = summarize(attribute)
# separated[i] = summaries
summaries = {}
for classValue, attrset in separated.iteritems():
summaries[classValue] = summarize(attrset)
return summaries
# 根据高斯密度函数计算每个样本属于某个类别的概率
# 根据属性的标准差和均值来计算概率,再累乘,得到属于某个类别的概率
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.iteritems():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
# print detail_prob
return probabilities
# 对单个样本的类别进行预测
def predict(summaries, inputVector):
probabilities, _ = calculateClassProbabilities(summaries, inputVector)
bestProb = -1
for classValue, probability in probabilities.iteritems():
if probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
# 对单个样本的类别进行预测
def predict2(summaries, inputVector, prob_y1):
probabilities = calculateClassProbabilities(summaries, inputVector)
prob_y0 = 1 - prob_y1
# print prob_y1, prob_y0
prob_y1_x = (probabilities[1]*prob_y1)/(probabilities[1]*prob_y1 + probabilities[0]*prob_y0)
prob_y0_x = 1 - prob_y1_x
# print prob_y1_x, prob_y0_x
if prob_y1_x > prob_y0_x:
bestLabel = 1
bestProb = prob_y1_x
else:
bestLabel = 0
bestProb = prob_y0_x
return bestLabel, bestProb
# 对整个测试集的类别进行预测
def getPreditions(summaries, testset, prob_y1):
predictions = []
probs = []
for i in range(len(testset)):
result, prob = predict2(summaries, testset[i], prob_y1) # 这里没有写成这样testset[i,:-1],是因为之后使用属性来求高斯概率遍历不到的尾部的标签
predictions.append(result)
probs.append(prob)
return predictions, probs
# 计算精度
def getAccuracy(testset, predictions):
correct = 0
for i in range(len(testset)):
if testset[i][-1] == predictions[i]:
correct += 1
accuracy = correct / float(len(testset)) * 100.0
return accuracy
# 计算精度(precision)
# 精度是精确性的度量,表示被分为正例的示例中实际为正例的比例
def getPrecision(testset, predictions):
true_positives = 0
sums = 0
for i in range(len(testset)):
if predictions[i] == 1:
sums += 1
if testset[i][-1] == predictions[i]:
true_positives = true_positives + 1
precision = true_positives / float(sums) * 100.0
return precision
# 计算召回率(recall)
# 召回率是覆盖面的度量,度量有多个正例被分为正例
def getRecall(testset, predictions):
true_positives = 0
sums = 0
for i in range(len(testset)):
if testset[i][-1] == 1:
sums += 1
if predictions[i] == 1 and testset[i][-1] == predictions[i]:
true_positives = true_positives + 1
recall = true_positives / float(sums) * 100.0
return recall
def getF1(precision, recall):
F1 = (2*precision*recall)/(precision + recall)
return F1
def plotROC(predStrengths, classLabels):
# print predStrengths
cur = [1.0, 1.0]
y_sum = 0.0
nums_postives = np.sum(np.array(classLabels)==1)
y_step = 1/float(nums_postives)
x_step = 1/float(len(classLabels) - nums_postives)
sorted_indicies = predStrengths.argsort()
# print sorted_indicies
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
for index in sorted_indicies.tolist():
if classLabels[index] == 1.0:
del_x = 0
del_y = y_step
else:
del_x = x_step
del_y = 0
y_sum += cur[1]
ax.plot([cur[0], cur[0]-del_x], [cur[1], cur[1]-del_y], c='b')
cur = (cur[0]-del_x, cur[1]-del_y)
ax.plot([0,1],[0,1], 'b--')
plt.title("power dataset classifyer's ROC with LSTM and Bayes")
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
ax.axis([0,1,0,1])
plt.show()
# print 'the Area Under the Curve is:',y_sum*x_step
if __name__ == '__main__':
# 获取误差数据集和loopsensor的数据集
error_dataset, exception_label = createErrDataset()
# for i in range(error_dataset.shape[1]-1):
# print error_dataset[:,i]
# plt.hist(error_dataset[:,i])
# plt.show()
prob_y1 = np.sum(exception_label)/float(len(exception_label))
# print error_dataset.shape
# print power_data.shape
acc_sum = 0.0
epoch = 1
split_ratio = 0.6
for i in range(epoch):
# 获取误差数据集大小的一个全排列
# 作为索引以相同的方式打乱误差数据集和真实的车辆数据集
index = np.random.permutation(len(error_dataset))
error_dataset = error_dataset[index]
# power_data = power_data[index]
# 对误差数据集进行分离,分离出训练集和测试集
trainset, testset = splitErrDataset(error_dataset, split_ratio)
# 获取数据集中每个属性的信息(均值,标准差)
summaries = summarizeByClass(trainset)
# predictions = getPreditions(summaries, testset)
predictions, probs = getPreditions(summaries, testset, prob_y1)
# print 'truly:',testset[:,-1]
# print 'predict:',predictions
accuracy = getAccuracy(testset, predictions)
precision = getPrecision(testset, predictions)
recall = getRecall(testset, predictions)
F1 = getF1(precision, recall)
print('Accuracy: {:.2f}%').format(accuracy)
print('Precision: {:.2f}%').format(precision)
print('Recall: {:.2f}%').format(recall)
print('F1: {:.2f}%').format(F1)
acc_sum = acc_sum + accuracy
fpr, tpr, thresholds = roc_curve(testset[:,-1], np.array(probs))
plotROC(np.array(probs), testset[:,-1])
print(thresholds)
# plt.plot([0, 1], [0, 1], 'k--')
# plt.plot(fpr, tpr)
# plt.show()
print('Mean Accuracy: {:.2f}%').format(acc_sum/epoch)
# errLocate = detectErrLocate(predictions, summaries, testset)
# print errLocate
# _, powerTest = splitErrDataset(powerDataset, 0.67)
| [
"865268033@qq.com"
] | 865268033@qq.com |
a3f7c14e2c8ac3418bce0802b0f6f7996aba29cc | 84d210947c6928ca347a6254670edd516ff8df4a | /Python/Grapher.py | b9015c968d3392236a5ada1954e0fecade5536bf | [] | no_license | husinthewei/SHT75-Humidity | 1e3bff63a92e0f294ae109defae07301af3b56c6 | b6cf55933d4ebff70b35532e5e9609f7e2d009e6 | refs/heads/master | 2021-01-11T05:29:12.122266 | 2016-10-21T21:27:25 | 2016-10-21T21:27:25 | 71,508,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | import collections
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import time
import pyqtgraph as pg
import matplotlib.pyplot as plt
import FileWriter
import datetime
from matplotlib.backends.backend_pdf import PdfPages
class Grapher:
#Displays the past 8640 samples.
#8640 samples represents 24 hours of data taken every 10 seconds
#Once the deque's are filled, they start replacing the oldest elements
#Therefore, runs for more than 24 hours and only shows last 24 hours.
#ending of 1 means humidity and 2 means dewpoint
def __init__(self, ProgramStartTime = time.strftime("%Y%m%dT%H%M%S")):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
self.xData = collections.deque(maxlen=8640)
self.yData = collections.deque(maxlen=8640)
self.yData1 = collections.deque(maxlen=8640)
self.yData2 = collections.deque(maxlen=8640)
self.maxy = 32
self.Program_Start_Time = ProgramStartTime
self.app = QtGui.QApplication([])
self.p = pg.plot()
self.p.addLegend()
self.curve = self.p.plot(pen=pg.mkPen('g', width=3), name = "Temp")
self.curve1 = self.p.plot(pen=pg.mkPen('r', width=3), name = "Hmdty")
self.curve2 = self.p.plot(pen=pg.mkPen('b', width=3), name = "Dwpnt")
self.initializeGraph()
#Setting how the plot looks
def initializeGraph(self):
self.p.setRange(yRange=[-20,self.maxy])
self.p.setTitle('Temp/Hmdty/Dwpnt vs. Time')
self.p.setLabel(axis = 'left', text = 'Temp (C) Hmdty(%)')
self.p.setLabel(axis = 'bottom', text = "Hours since %s"%self.Program_Start_Time)
self.p.showGrid(x=True, y=True, alpha=None)
def updateMaxY(self, y, y1, y2):
if y > self.maxy:
self.maxy = y
self.p.setRange(yRange=[-20,self.maxy])
if y1 > self.maxy:
self.maxy = y1
self.p.setRange(yRange=[-20,self.maxy])
if y2 > self.maxy:
self.maxy = y2
self.p.setRange(yRange=[-20,self.maxy])
def plotData(self,x,y,y1,y2):
self.updateMaxY(y, y1, y2)
self.xData.append(x)
self.yData.append(y)
self.yData1.append(y1)
self.yData2.append(y2)
self.curve.setData(list(self.xData),list(self.yData)) #Plotting the data
self.curve1.setData(list(self.xData),list(self.yData1))
self.curve2.setData(list(self.xData),list(self.yData2))
def processEvents(self):
self.app.processEvents()
#Produce a "good looking" graph with matplotlib
#Also, export it to a PDF file
#Creates using the CSV file
def produceGraph(self, path):
File_Writer = FileWriter.FileWriter()
data = File_Writer.getCsvData(path)
startTime = data[0][0]
plt.figure()
plt.clf()
plt.ylim(-20, self.maxy)
xData = self.extractTimeElapsed(data[0], startTime)
tmp = plt.plot(xData,data[1], "g", label = "temp")
hmdty = plt.plot(xData, data[2], "r", label = "hmdty")
dwpnt = plt.plot(xData, data[3], "b", label = "dwpnt")
plt.legend(loc = "lower right")
#plt.legend(handles=[tmp, hmdty, dwpnt])
plt.ylabel('Temp(C) Hmdty(%)')
plt.xlabel('Hours since %s'%startTime)
plt.title('Temp/Hmdty/Dwpnt vs. Time')
fname = self.extractFileName(path)
pp = PdfPages('Graphs\%s.pdf'%fname)
pp.savefig()
pp.close()
#Extract the file name from the path
def extractFileName(self, path):
fname = path.split('\\')[-1]
fname = fname.split('.')[0]
return fname
def extractTimeElapsed(self, data, t0):
t0 = datetime.datetime.strptime(t0,"%Y-%m-%dT%H:%M:%S")
for i in range(len(data)):
t = datetime.datetime.strptime(data[i],"%Y-%m-%dT%H:%M:%S")
t = ((t-t0).total_seconds())/3600 #hours elapsed
data[i] = t
return data | [
"wae3wae@yahoo.com"
] | wae3wae@yahoo.com |
69093d96a03fc2ddc7f4fd1fb870114f283018ca | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03574/s625136603.py | 8c1ddbc3ce968e40601728c96995e7838eb37d66 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | h,w=map(int, input().split())
w1=['.'*(w+2)]
s=w1+['.'+input()+'.' for _ in range(h)]+w1
for i in range(1,h+1):
for j in range(1,w+1):
if s[i][j]=='.':
t=s[i-1][j-1:j+2]+s[i][j-1:j+2]+s[i+1][j-1:j+2]
s[i]=s[i][:j]+str(t.count('#'))+s[i][j+1:]
print(s[i][1:-1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c2df6d6c9916fde341abb0d235790b8577ee05b0 | 2c838d3ffee6e357014dd0cd543ef841503d6647 | /src/Watcher/transforms/client2manufact.py | 96618403f6e2beb9ec4a0730b0ab2fe62594ced8 | [] | no_license | catalyst256/Watcher | 079bb0ffead77c46a814e01e851cf1b6a33b2678 | 14123f501643475fc97b64093284c1b509897550 | refs/heads/master | 2021-01-25T10:29:18.110796 | 2015-01-16T07:43:44 | 2015-01-16T07:43:44 | 14,232,782 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/env python
import sqlite3 as lite
from common.entities import WirelessClient, Vendor
from canari.maltego.message import UIMessage
from canari.framework import configure #, superuser
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2013, Watcher Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = 'catalyst256@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
#@superuser
@configure(
label='Watcher - MAC Address Lookup',
description='Tries to work out the vendor from the MAC address',
uuids=[ 'Watcher.v2.client_2_manufacturer' ],
inputs=[ ( 'Watcher', WirelessClient ) ],
debug=True
)
def dotransform(request, response):
mac_addr = request.value[:-9].upper()
mac_addr = mac_addr.replace(':', '')
mac_db = 'Watcher/resources/databases/macaddr.db'
mac_vendor = []
con = lite.connect(mac_db)
with con:
cur = con.cursor()
cur.execute('SELECT * FROM macaddr WHERE mac like ' + "\"" + mac_addr + "\"")
while True:
row = cur.fetchone()
if row == None:
break
if row[1] not in mac_vendor:
mac_vendor.append(row[1])
for x in mac_vendor:
e = Vendor(x)
response += e
return response | [
"catalyst256@gmail.com"
] | catalyst256@gmail.com |
a639b163a4c5627eae8fd869de652c166d73a57b | c3661f55954d4255424ce4e6b5c26e0f6b69be6d | /gfx2/pycv/raster_ops.py | d216c4e6f0dbde6e68c5315c5e17fa393f4b08bb | [] | no_license | keithlegg/pyrender | 58413b0f9ee664d399f8570f30d737a6dc79c386 | 2ee1a66fab3d5bdc05a55c0c552fdbdc0d887393 | refs/heads/master | 2021-01-16T18:26:28.480423 | 2018-12-06T00:44:59 | 2018-12-06T00:44:59 | 100,080,373 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,563 | py | #!/usr/local/bin/python3
import os, sys, math
from PIL import Image, ImageOps
from pycv.constants import *
from pycv.point_ops import PointGen2D
class RasterObj(object):
def __init__(self):
self.ptgen = PointGen2D()
self.debug_msg = False
self.res_x = None
self.res_y = None
self.bitmode = 'RGBA' #PIL mode
self.fb = None #main framebuffer
def log_msg(self, *args ):
if self.debug_msg:
msg=''
for value in args :
msg+=( "%s " % str(value) )
print(msg)
def image_info(self):
stats = {}
stats['resolution']='W:'+str(self.res_x)+' H:'+str(self.res_y)
print( stats )
def save_file(self, name):
print("Saving file to: " + name)
self.fb.save(name)
def load_file(self, name):
self.fb = Image.open(name)
self.res_x = self.fb.size[0]
self.res_y = self.fb.size[1]
def set_res(self, rx, ry):
self.res_x = rx
self.res_y = ry
def create_buffer(self, rx, ry):
self.res_x = rx
self.res_y = ry
self.fb = Image.new(self.bitmode, (self.res_x, self.res_y) )
def read_buffer(self, pilBuffer):
#make sure you pass a PIL Image object
self.fb = pilBuffer
self.res_x = pilBuffer.size[0]
self.res_y = pilBuffer.size[1]
#print("debug raster op buffer read ", self.fb.show() )
@property
def extents(self):
return self.ptgen.calc_square_diag( (0,0), (self.size[0],self.size[1]) )
@property
def center(self):
return ( int(self.res_x/2), int(self.res_y/2) )
@property
def size(self):
return ( self.fb.size )
def invert(self):
if self.fb.mode != 'L':
self.fb = self.fb.convert('L')
self.fb= ImageOps.invert(self.fb)
if self.fb.mode != 'RGBA':
self.fb = self.fb.convert('RGBA')
def cvt_1bit(self):
#img.point(lambda x: bool(x))
self.fb = self.fb.convert('L') # convert 8 bit
self.fb = self.fb.convert('1') # convert 1 bit
def cvt_24bit_alpha(self):
self.fb = self.fb.convert("RGBA")
def cvt_24bit(self):
self.fb = self.fb.convert("RGB")
def get_pix(self, pt ):
self.fb.getpixel(pt)
def set_pix(self, pt , color ):
dpix = self.fb.load()
dpix[pt[0], pt[1]] = color
def rotate_pil_raw(self, rotation):
#rotate and expand - nothing else
self.fb = self.fb.rotate(rotation, expand=1)
self.res_x = self.fb.size[0]
self.res_y = self.fb.size[1]
def rotate_pil(self, rotation):
#rotate, expand and composite white in the empty areas
if self.fb.mode != 'RGBA':
self.fb = self.fb.convert('RGBA')
rot = self.fb.rotate(rotation, expand=1)
self.res_x = rot.size[0]
self.res_y = rot.size[1]
fff = Image.new('RGBA', rot.size, (255,)*4) #white mask to composite
self.fb = Image.composite(rot, fff, rot)
def add_margins(self, size):
old_size = self.fb.size
new_size = (old_size[0]+size, old_size[1]+size)
#new_im = Image.new(self.fb.mode, new_size) #for black
new_im = Image.new(self.fb.mode, new_size, (255,)*4) #for white
new_im.paste(self.fb, (new_size[0]-old_size[0])/2, (new_size[1]-old_size[1])/2 )
new_im.show()
def get_island(self, offset=None):
""" I dont like that it has to convert type to do this - debug make a copy of self?
this uses PIL.getbbox to exclude "empty" data from the edges of an image
"""
self.cvt_24bit()
tmp_fb = ImageOps.invert(self.fb)
inside_data = tmp_fb.getbbox()
self.cvt_24bit_alpha()
if not offset:
return self.ptgen.extents_fr_bbox(inside_data)
if offset:
return self.ptgen.extents_fr_bbox(inside_data, offset)
def crop_island(self, margin=None):
""" crop out the image borders with no data in them
optional margin will buffer the image borders with white
nagative margins will trim the image edges
"""
#you cant invert an image with alpha in PIL
self.cvt_24bit() #first we convert to RGB
tmp_fb = ImageOps.invert(self.fb)
inside_data = tmp_fb.getbbox() #crops out black pixels around edges
if not margin:
self.fb = self.fb.crop( inside_data )
self.cvt_24bit_alpha()#convert back to RGBA
if margin:
if margin <0:
inside_data=self.ptgen.add_margin_bbox(inside_data, margin)
self.fb = self.fb.crop( inside_data )
self.cvt_24bit_alpha()#convert back to RGBA
if margin >0:
self.fb = self.fb.crop( inside_data )
self.cvt_24bit_alpha()#convert back to RGBA
double = int(margin*2)
bgimg = Image.new('RGBA', (self.fb.size[0]+double, self.fb.size[1]+double), (255,)*4) #white mask to composite
img_w, img_h = self.fb.size
bg_w, bg_h = bgimg.size
bgimg.paste(self.fb, (margin, margin ) )
##
self.fb = bgimg
self.cvt_24bit_alpha()#convert back to RGBA
self.res_x = bg_w
self.res_y = bg_h
def crop_pt(self, pt_coord, size):
#crop area from point
xtntx = tuple(self.ptgen.calc_bbox( size, pt_coord) )
if xtntx[0]<0 or xtntx[1]<0 or xtntx[2]>self.res_x or xtntx[3]>self.res_y:
print('# ERROR raster_ops.crop_pt - out of image bounds')
return self.fb.crop( xtntx )
def crop_corner(self, size, mode):
#crop the corners in a square
if mode == 'bl':
xtntx = (0, self.res_y - size, size, self.res_y)
if mode == 'tl':
xtntx = (0, 0, size, size)
if mode == 'tr':
xtntx = (self.res_x - size, 0, self.res_x, size)
if mode == 'br':
xtntx = (self.res_x - size, self.res_y - size, self.res_x, self.res_y)
return self.fb.crop( xtntx )
class PixelOp (RasterObj):
"""
Pixel operator with raster goodies for drawing and sampling pixels
TODO:
deal with cases where the sampling runs off the page
"""
def __init__(self):
super(PixelOp , self).__init__()
self.filter = pixelFilter()
## ## ## ## ##
def graticule(self, spacing=10, scale=1):
""" make a graticule grid
start at center and go out from there based on spacing value
spacing is in pixels
"""
clr_backg = (0,50,90)
clr_lines = (0,150,190)
clr_dots = (0,255,0)
gridcolor = (75,100,80)
#draw a dot in the center
cen_x = self.center[0]
cen_y = self.center[1]
#flood fill back ground
self.fill_color( clr_backg )
#optional zoom
spacing = spacing*scale
res_x = self.res_x*scale
res_y = self.res_y*scale
x = cen_x
while(x<self.res_x):
self.connect_the_dots( [(x, 0), (x, res_y)],
clr_lines, 1 )
x+=spacing
x = cen_x
while(x>0):
self.connect_the_dots( [(x, 0), (x, res_y)],
clr_lines, 1 )
x-=spacing
y = cen_y
while(y>0):
self.connect_the_dots( [(0, y), (res_x, y)],
clr_lines, 1 )
y-=spacing
y = cen_y
while(y<self.res_y):
self.connect_the_dots( [(0, y), (res_x, y)],
clr_lines, 1 )
y+=spacing
#draw lines from center across image
self.vert_line(self.center[0], gridcolor)
self.horiz_line(self.center[1], gridcolor)
#put a dot at the center
self.draw_fill_circle(self.center[0],self.center[0], 2, (200,255,0) )
## ## ## ## ##
def draw_cntr_line(self, points, color=(0,255,200), size=1, mag=1, framebuffer=None):
""" DEBUG use offset feature of connect_the_dots """
if mag >1:
tmp = []
for pt in points:
tmp.append( (pt[0]*mag, pt[1]*mag, pt[2]*mag ) )
points = tmp
else:
tmp = points
if framebuffer==None:
framebuffer = self.fb
self.connect_the_dots( tmp, color, size, origin=(self.center[0] ,self.center[1]), framebuffer=framebuffer)
## ## ## ## ##
def draw_cntr_pt(self, dot, size=1, origin=(0,0), color=(255,0,0), framebuffer=None):
""" draw a point relative to center of image """
sp = (self.center[0]+origin[0]) + dot[0]
ep = (self.center[1]+origin[1]) + dot[1] #make y negative to flip "UP" -PIL uses top left origin
#put a dot at the center
self.draw_fill_circle(sp, ep, size, color )
## ## ## ## ##
def draw_vector_2d(self, vec, invert_y=True, origin=(0,0)):
#make y negative to flip "UP" -PIL uses top left origin
#-1 will flip , 1 will NOT flip
if invert_y:
invert = -1
else:
invert = 1
scale = 10 #pixels to grid size ratio
ex = (self.center[0]+origin[0]) + (vec[0]*scale)
ey = (self.center[1]+origin[1]) + (vec[1]*scale) * invert
self.graticule(10)
#args are ( points, color, thickness, framebuffer=None):
self.connect_the_dots([ ((self.center[0]+origin[0]),(self.center[1]+origin[1])),
(ex,ey)], (0,200,0), 2 )
self.connect_the_dots([ ((self.center[0]+origin[0]),(self.center[1]+origin[1])),
(ex,ey)], (0,230,0), 1 )
print("ANGLE OF VECTOR FROM VERTICAL (UP) %s"%self.ptgen.old_calc_theta_vert( ((self.center[0]+origin[0]),(self.center[1]+origin[1])), (ex,ey) ) )
## ## ## ## ##
def normal_to_color(self, norml):
out = [0,0,0]
out[0]=int(norml[0]*255)
out[1]=int(norml[1]*255)
out[2]=int(norml[2]*255)
if out[0]>255:
out[0]=255
if out[1]>255:
out[1]=255
if out[2]>255:
out[2]=255
return tuple(out)
## ## ## ## ##
def tint(self, color, com):
""" i needed a way to procedurally tweak color
used for the glowing neon line effect to darken linear borders
"""
amt = 120
clamp_low = amt #poor - make this better!
clamp_high = amt #poor - make this better!
tmp = 0
tl = [0,0,0]
#minus_red
if com == 'drkr':
t = color
if t[0]>clamp_low:
tl[0]=t[0]-amt
if t[1]>clamp_low:
tl[1]=t[1]-amt
if t[2]>clamp_low:
tl[2]=t[2]-amt
return ( tl[0], tl[1], tl[2] )
#minus_red
if com == 'mr':
t = color
if t[0]>clamp_low:
tmp=t[0]-amt
return ( tmp, t[1], t[2] )
#minus_green
if com == 'mg':
t = color
if t[1]>clamp_low:
tmp =t[1]-amt
return ( t[0], tmp, t[2] )
#minus_blue
if com == 'mb':
t = color
if t[2]>clamp_low:
tmp =t[2]-amt
return ( t[0], t[1], tmp )
def pretty_much_yellow(self, pixel):
if pixel[0]>250 and pixel[1]>250 and pixel[2]<15:
return True
return False
def insert_image(self, px, py, foregroundfile, backgroundfile, outfile):
""" Filename1 and 2 are input files; outfile is a path where results are saved (with extension)."""
img = Image.open(foregroundfile ,'r')
img_w, img_h = img.size
#background = Image.new('RGBA', (1024,512), (255, 255, 255, 255))
bgimg = Image.open(backgroundfile ,'r')
bg_w, bg_h = bgimg.size
bgimg.paste(img, (px, py ) )
bgimg.save(outfile)
def fill_color(self, color, framebuffer=None):
""" fills image with solid color """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
for x in range(self.res_x):
for y in range(self.res_y):
dpix[ x, y ] = color
def center_square(self, tl, br, color, framebuffer=None):
""" fills a centered square from the top left to bottom right corner """
if framebuffer==None:
framebuffer = self.fb
dpix = framebuffer.load()
for x in range(tl, self.res_x):
for y in range(br, self.res_y):
if x <self.res_x-tl and y <self.res_y-br:
dpix[ x, y ] = color
#if y <self.res_y-br:
# dpix[ x, y ] = color
def vert_line(self, xloc, color, framebuffer=None):
""" draw vertical line across entire image """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
for x in range(self.res_x):
if x == xloc:
for y in range(self.res_y):
dpix[ x, y ] = color
def horiz_line(self, yloc, color, framebuffer=None):
""" draw horizontal line across entire image """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
for y in range(self.res_y):
if y == yloc:
for x in range(self.res_x):
dpix[ x, y ] = color
def vert_line_thick(self, xloc, width, color, framebuffer=None):
""" draw horizontal line with thickness """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
for x in range(self.res_x):
if x == xloc:
for w in range(x, x+width):
for y in range(self.res_y):
dpix[ w, y ] = color
def batch_draw_pixels(self, data, framebuffer=None):
""" draw scanned data back into an image [ (value, (x,y)) .. ] """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
for px in data:
#dpix[px[1][0], px[1][1]] = px[0]
if px[0] ==1:
dpix[px[1][0], px[1][1]] = red
if px[0] ==0:
dpix[px[1][0], px[1][1]] = green
def draw_fill_circle(self, x_orig, y_orig, dia, color, framebuffer=None):
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
for x in range(dia):
self.draw_circle( x_orig, y_orig, x, color, framebuffer)
def draw_circle(self, x_orig, y_orig, dia, color, framebuffer=None):
plot_x = 0;plot_y = 0;
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
dpix = framebuffer.load()
if framebuffer.mode =='P':
if color[0] or color[1] or color[2]:
color = 128
else:
color = 0
for i in self.ptgen.calc_circle(x_orig, y_orig, dia):
try:
dpix[ i[0], i[1] ]= color
except IndexError:
pass
def draw_points_batch(self, points, color, dia, framebuffer=None):
""" debug - add check to make sure it doesnt go off edge of page """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
for pt in points:
self.draw_fill_circle(pt[0], pt[1], dia, color, framebuffer)
def connect_the_dots(self, points, color, thickness, origin=(0,0), framebuffer=None):
""" debug - add check to make sure it doesnt go off edge of page """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
##
#count = 0
for pt in range(len(points)-1):
p1 = list(points[pt])
p2 = list(points[pt+1])
#shift to another place before drawing
if origin[0]!=0 or origin[1]!=0:
p1[0] = p1[0]+origin[0]
p1[1] = p1[1]+origin[1]
p2[0] = p2[0]+origin[0]
p2[1] = p2[1]+origin[1]
#if count>0:
color=color
self.draw_line(tuple(p1), tuple(p2), color, thickness, framebuffer)
#count += 1
def draw_vector(self, vec, color, thickness=0, framebuffer=None):
self.draw_line(vec[0] , vec[1] , color, thickness, framebuffer )
def draw_line(self, pt1, pt2, color, thickness=0, framebuffer=None):
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
pts = self.ptgen.calc_line(pt1[0], pt1[1], pt2[0], pt2[1])
#attempt to make it work at different bit depths
if framebuffer.mode =='P':
if color[0] or color[1] or color[2]:
color = 0 #black
else:
color = 128 #white
dpix = framebuffer.load()
for pt in pts:
if not thickness:
dpix[ pt[0], pt[1] ] = color
#really crappy way to add line thickness - makes a point a "plus sign"
if thickness:
for pthick in range(0, thickness):
try:
dpix[ pt[0], pt[1] ] = color
dpix[ pt[0], pt[1]+pthick ] = color
dpix[ pt[0], pt[1]-pthick ] = color
dpix[ pt[0]+pthick, pt[1] ] = color
dpix[ pt[0]-pthick, pt[1] ] = color
except IndexError:
pass
def draw_pt_along_vector(self, pt1, pt2, num, color, dia=1, framebuffer=None):
""" draw any number of points along a vector """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
pts = self.ptgen.locate_pt_along( pt1[0], pt1[1], pt2[0], pt2[1], num )
dpix = framebuffer.load()
for pt in range(len(pts)):
self.draw_fill_circle( pts[pt][0], pts[pt][1], 5, color, framebuffer)
############################################################
#these are old remnants of the computer vision code - consider new class for this?
############################################################
def line_scan(self, pt1, pt2 , filterNoise=False, framebuffer=None):
"""
filternoise is a tuple/ (#places to look forward/back , replace value)
scan a row of pixels along a line and return array of 1's and 0's
this is useful in two ways:
- it checks averages pixels into black or white and stores them serialized
- it also stores location of each pixel in XY catesian space
[(PIXEL, ( XCOORD, YCOORD) ) .. ]
"""
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
x_1 = pt1[0]
y_1 = pt1[1]
x_2 = pt2[0]
y_2 = pt2[1]
pts = self.ptgen.calc_line( x_1, y_1, x_2, y_2 )
output = []
for pt in pts:
pixel_bit = 0
if self.scanner_darkly( framebuffer.getpixel(pt)):
pixel_bit = 1
output.append( (pixel_bit,pt) ) #( (color,coordinate),.. )
if filterNoise:
output = self.filter.filter_noise(output, filterNoise[0], filterNoise[1], True, False)
return output
def get_luminance(self, point, framebuffer=None):
""" Returns the perceived luminance, from 0 - 1, of given point.
Works with 'RGBA', 'RGB', 'L' and '1' image modes, and if an unexpected mode is
encountered raise an assertion error. For RGB uses ITU-R 601-2 luma transform.
Alpha channels are ignored.
"""
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
mode = framebuffer.mode
assert mode in ('RGBA', 'RGB', 'L', '1')
# Running off the edge of the page shall be black
try:
color = framebuffer.getpixel(point)
except IndexError:
return 0
if mode == 'RGBA':
brightness = (0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2]) # * (color[3] / 255) # ignore alpha
brightness = brightness / 255
elif mode == 'RGB':
brightness = 0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2]
brightness = brightness / 255
elif mode == 'L':
brightness = color / 255
elif mode == '1':
brightness = color
return brightness
def scanner_darkly(self, pixel):
""" quantize a 1/8/24 bit color pixel into a 1 bit boolean value """
#1 bit
if isinstance( pixel, int ):
if pixel==0:
return True
else:
return False
#24 or 32 bit
else:
#avg = sum([val for val in pixel]) / len(pixel)
if pixel[0]<15 and pixel[1]<15 and pixel[2]<15:
return True
return False
def line_scan_simple(self, pt1, pt2, calc_brightness=True, framebuffer=None):
""" Returns a list of tuples of (color, (x, y)) for each pt from pt1 to pt2.
use_brightness - If True, returns color as a scalar 0 - 1 representing brightness.
Otherwise, returns the color as represented by PIL.
"""
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
pts = self.ptgen.calc_line(pt1[0], pt1[1], pt2[0], pt2[1])
output = []
for pt in pts:
pixel_bit = 0
if calc_brightness:
color = self.get_luminance(pt)
else:
color = framebuffer.getpixel(pt)
output.append((color, pt))
return output
def line_scan_frames(self, pt1, pt2, filterNoise=False, framebuffer=None):
"""
filternoise is a tuple/ (#places to look forward/back , replace value)
sort by contiguous blocks , put in list of lists (each sub list = a length)
for example 11110010101110000 = [ [1111], [00], [1], [0], [1], [0], [111], [0000] ]
True is a black pixel
stores tuple of tuples - (((True/False), (x,y)), ... )
"""
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
if filterNoise:
self.log_msg('# scanning with noise filter active')
pixels = self.line_scan( pt1, pt2, filterNoise )
else:
pixels = self.line_scan( pt1, pt2 )
changecount = 0
lastpix = None
data_frames = [] #sort into groups of changes
buff = []
for p in pixels:
if p[0] == lastpix:
buff.append(p)
if p[0] != lastpix:
if buff!=[]:
data_frames.append(buff)
buff = []
buff.append(p)
changecount += 1
lastpix = p[0]
#grab the last block if it is different
if buff != lastpix:
data_frames.append(buff)
return data_frames
def circle_scan(self, x_orig, y_orig, dia, framebuffer=None):
""" orignial scoring tool, looks in a circle for dark pixles from a center point """
if framebuffer:
self.read_buffer(framebuffer)
else:
framebuffer= self.fb
pts = self.ptgen.calc_circle(x_orig, y_orig, dia)
pts.append((x_orig, y_orig)) #add the center too
is_checked = False
for pt in pts:
if ( self.scanner_darkly( framebuffer.getpixel(pt) ) ):
is_checked = True
return is_checked
class pixelFilter(object):
"""
home for various image filters for serialized data, pixels, etc
"""
def mean_pix(self, listv):
""" average the sampled pixel data [(Value, (X,Y))] """
value = 0
for v in listv:
value+=v[0]
return round( value/len(listv) )
def filter_noise(self, scandata, filterSize, repval, shift_data=False, bookend=False):
"""
TODO - whatever the first and last value is gets "carried"
(like the openCV healing feature - grapefruit example in book)
basically averages a linear buffer of pixels
"""
output = []
lastpixel = 0
total_cnt = 0
size_data = len(scandata)-1
future_pix = -1 #the sample ahead pixel value (data is pre thresholded to 1 or 0)
if shift_data:
shiftsize = round(filterSize/2)
for b in scandata:
filt = None
#lets look into the future of our data
sample_ahead = total_cnt+filterSize
if sample_ahead<=size_data:
future_pix = scandata[sample_ahead]
fwindow =[]
for w in range(total_cnt, sample_ahead):
fwindow.append(scandata[w])
#sample behind now
pwindow = []
for w in range(0, -filterSize, -1):
pwindow.append( scandata[w])
avgf = self.mean_pix(fwindow)
avgp = self.mean_pix(pwindow)
if (round(avgf+avgp/2)):
if shift_data:
filt = (1, scandata[total_cnt+shiftsize][1] )
else:
filt = (1, b[1] )
else:
filt = (repval, b[1] )
output.append(filt)
if sample_ahead>size_data:
future_pix = -1 #null value thats not 0 or 1
total_cnt+=1
if bookend:
return output[bookend[0]:-bookend[1]]
else:
return output
| [
"noreply@github.com"
] | noreply@github.com |
f3fcbba0237d608e49a75d1fa5647d4f603bfbd2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/KoubeiCateringPosPaymodeModifyRequest.py | 61eb98611f3cf9308bdfa7fdf4eec6bb8fc78aa6 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,979 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiCateringPosPaymodeModifyModel import KoubeiCateringPosPaymodeModifyModel
class KoubeiCateringPosPaymodeModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiCateringPosPaymodeModifyModel):
self._biz_content = value
else:
self._biz_content = KoubeiCateringPosPaymodeModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.catering.pos.paymode.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
7ccfd14dead95056845a44a35d88d23563fec52b | 489bec15ce120ee8bb28a251cee4625da1e9a5f8 | /mimiron/vendor/dockerhub.py | bb792bc5f07e56878bd13dfb9e8bbadc19283aa9 | [
"MIT"
] | permissive | Nirovision/mimiron | 1fc40b3077a2852fec17ba282e1a359e109cd3c7 | adba1e762b1ae272c833f1843b179f3438f20774 | refs/heads/master | 2022-03-20T02:49:48.623595 | 2018-01-18T04:33:30 | 2018-01-18T04:33:30 | 80,687,010 | 0 | 0 | MIT | 2019-10-22T23:51:38 | 2017-02-02T02:42:18 | Python | UTF-8 | Python | false | false | 2,353 | py | # -*- coding: utf-8 -*-
import requests
import json
from mimiron.exceptions.vendor import InvalidDockerHubCredentials
from mimiron.exceptions.vendor import DockerConnectionError
class DockerHubAuthentication(object):
def __init__(self, username, password, org, generate_token=True):
self.username = username
self.password = password
self.org = org
self._token = None
if generate_token:
self._token = self.generate_token()
def generate_token(self):
payload = json.dumps({
'username': self.username, 'password': self.password,
})
headers = {
'Content-Type': 'application/json',
}
endpoint = 'https://hub.docker.com/v2/users/login/'
try:
response = requests.post(endpoint, data=payload, headers=headers)
if response.status_code != 200:
raise InvalidDockerHubCredentials
except requests.exceptions.ConnectionError:
raise DockerConnectionError
return response.json()['token']
@property
def token(self):
if not self._token:
self._token = self.generate_token()
return self._token
@token.setter
def token(self, new_token):
self._token = new_token
def _api_request(endpoint, method, auth):
token = auth.token
if token is None:
return None
try:
response = method(endpoint, headers={'Authorization': 'JWT %s' % (token,)})
return response.json() if response.status_code == 200 else None
except requests.exceptions.ConnectionError:
raise DockerConnectionError
def list_repositories(auth, page_size=100):
endpoint = 'https://hub.docker.com/v2/repositories/%s/?page_size=%s' % (
auth.org, page_size,
)
response = _api_request(endpoint, requests.get, auth)
return response['results'] if response is not None else response
def list_image_tags(auth, image_name, page_size=100):
endpoint = 'https://hub.docker.com/v2/repositories/%s/%s/tags/?page_size=%s' % (
auth.org, image_name, page_size,
)
response = _api_request(endpoint, requests.get, auth)
return response['results'] if response is not None else []
def build_image_abspath(auth, image_name, tag):
return auth.org + '/' + image_name + ':' + tag
| [
"david.vuong256@gmail.com"
] | david.vuong256@gmail.com |
c6a75258bb5ae3ae1815a675b53e477103f5dc57 | 8f65eca40a8988bb2602ec0e13f2a94c4ef85459 | /models/google_model.py | 77c84e9a46f57c53b9cba3f62fbbbc7612da9e90 | [] | no_license | tonylearn09/emobot_server | 414a5de0d1cc2c0e29be312ba78a6d23cd546ae9 | a0dd6f37305f0b5fada61751df970519344f5f12 | refs/heads/master | 2021-08-23T05:05:44.277331 | 2017-12-03T14:24:13 | 2017-12-03T14:24:13 | 112,908,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | from __future__ import print_function
import os, sys
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
import json
def google_eval(dialog, sentence_level=False):
"""Evaluate emotion with ibm Waston
Args:
dialog: a list of conversation (document)
Returns:
score: a list of number between [0, 1] for each doc in dialog
"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = 'My First Project-f4f9e8a13281.json'
client = language.LanguageServiceClient()
document = [types.Document(content=doc,
type=enums.Document.Type.PLAIN_TEXT) for doc in dialog]
annotations = [client.analyze_sentiment(document=doc) for doc in document]
score = [anno.document_sentiment.score for anno in annotations]
return score
| [
"tonyhung09@gmail.com"
] | tonyhung09@gmail.com |
574eb714f6cbcbcb773e632a2d27edc0432402d6 | 655f30959e533a831c2005572517f0deef57b0a9 | /train_test_split.py | 100735941add1f9524c2cb2c0459ee32d8322cdb | [
"MIT"
] | permissive | sdsmnc221/object_detection_demo | 83c14168810c665a36a7920594a8a0a540237fea | b8f3694becf0f43c06f0c1d3a4c4c0bcf0348f25 | refs/heads/master | 2020-08-09T18:42:17.341791 | 2019-10-10T16:08:29 | 2019-10-10T16:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py |
# Copyright 2014-2017 Bert Carremans
# Author: Bert Carremans <bertcarremans.be>
#
# License: BSD 3 clause
import os
import random
from shutil import copyfile
def img_train_test_split(img_source_dir, train_size):
"""
Randomly splits images over a train and test folder, while preserving the folder structure
Parameters
----------
img_source_dir : string
Path to the folder with the images to be split. Can be absolute or relative path
train_size : float
Proportion of the original images that need to be copied in the subdirectory in the train folder
"""
if not (isinstance(img_source_dir, str)):
raise AttributeError('img_source_dir must be a string')
if not os.path.exists(img_source_dir):
raise OSError('img_source_dir does not exist')
if not (isinstance(train_size, float)):
raise AttributeError('train_size must be a float')
# Set up empty folder structure if not exists
if not os.path.exists('data'):
os.makedirs('data')
else:
if not os.path.exists('data/train'):
os.makedirs('data/train')
if not os.path.exists('data/test'):
os.makedirs('data/test')
# Get the subdirectories in the main image folder
subdirs = [subdir for subdir in os.listdir(img_source_dir) if os.path.isdir(os.path.join(img_source_dir, subdir))]
for subdir in subdirs:
subdir_fullpath = os.path.join(img_source_dir, subdir)
if len(os.listdir(subdir_fullpath)) == 0:
print(subdir_fullpath + ' is empty')
break
train_subdir = os.path.join('data/train', subdir)
test_subdir = os.path.join('data/test', subdir)
# Create subdirectories in train and test folders
if not os.path.exists(train_subdir):
os.makedirs(train_subdir)
if not os.path.exists(test_subdir):
os.makedirs(test_subdir)
train_counter = 0
test_counter = 0
# Randomly assign an image to train or test folder
for filename in os.listdir(subdir_fullpath):
if filename.endswith(".jpg") or filename.endswith(".png"):
fileparts = filename.split('.')
xml = fileparts[0] + '.xml'
if random.uniform(0, 1) <= train_size:
# copyfile(os.path.join(subdir_fullpath, filename), os.path.join(train_subdir, str(train_counter) + '.' + fileparts[1]))
copyfile(os.path.join(subdir_fullpath, filename), os.path.join(train_subdir, filename))
copyfile(os.path.join(subdir_fullpath, xml), os.path.join(train_subdir, xml))
train_counter += 1
else:
copyfile(os.path.join(subdir_fullpath, filename), os.path.join(test_subdir, filename))
copyfile(os.path.join(subdir_fullpath, xml), os.path.join(test_subdir, xml))
test_counter += 1
print('Copied ' + str(train_counter) + ' images to data/train/' + subdir)
print('Copied ' + str(test_counter) + ' images to data/test/' + subdir)
img_train_test_split('data/raw/', 0.8) | [
"antr.2201@gmail.com"
] | antr.2201@gmail.com |
779b6c677b3b9b513d6f6864adc0b7c4741437db | ea59827f3fcba3a030d2d665d7a6ddbf7c0e6903 | /dl/dltest/test_numpy.py | 48d37faa5ff03ae8cb69e35ae6c478f3b9e59793 | [] | no_license | PPPokerFace/FaceFinal | 23164522d9ac7fbc58757261dc2c049e3a7e8664 | 4d28a3bb093200669f2f7b337a907f035b650032 | refs/heads/master | 2020-04-15T01:43:16.048558 | 2019-06-17T16:07:16 | 2019-06-17T16:07:16 | 164,288,774 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | import numpy as np
import json
import sys
sys.path.append("..")
import base64
def base64_encode_image(a):
# base64 encode the input NumPy array
return base64.b64encode(a).decode("utf-8")
def base64_decode_image(a, dtype, shape):
# if this is Python 3, we need the extra step of encoding the
# serialized NumPy string as a byte object
if sys.version_info.major == 3:
a = bytes(a, encoding="utf-8")
# convert the string to a NumPy array using the supplied data
# type and target shape
a = np.frombuffer(base64.decodebytes(a), dtype=dtype)
return a
a = np.array([1, 2, 3, 4, 5])
a = base64_encode_image(a)
if sys.version_info.major == 3:
a = bytes(a, encoding="utf-8")
a = np.frombuffer(base64.decodebytes(a),dtype=int)
print(a)
| [
"653297351@qq.com"
] | 653297351@qq.com |
e29899992d7b9d372aed601eae6f1f6896db9247 | a83dc7ccce7962addbb7a7d3f45eea1dac000a21 | /10day/2.py | 8d49febd380f81aa32131265dce0dbbe43835e22 | [] | no_license | liruixiong/1808 | 879bb90587db0a7073e1a9b5b6c98e7d754feaf9 | 45f67f0ea8b25a7a68efd07272f6f361eae625c3 | refs/heads/master | 2020-03-25T19:34:37.676624 | 2018-08-22T01:49:04 | 2018-08-22T01:49:04 | 144,089,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py |
at =float(input(" "))
y = float(input(" "))
mo = input("+-*/")
if mo == "+":
print(at+y)
elif mo == "-":
print(at-y)
elif mo == "*":
print(at*y)
elif mo == "/":
print(at/y)
| [
"1356703763@qq.com"
] | 1356703763@qq.com |
7125e25c38dacab3919c4bba09801be80cb2f3c3 | bc20e62ae6d256e599d1f8d5c87508bab71f6b64 | /input.py | ed8d222f66f44b2a4e60b511cdc83945718a5865 | [
"MIT"
] | permissive | MichealGoldman/python_samples | efe5d67eb1f3160fd7499b86a40ebddcaa9086a2 | c4cd8af3cee99a5199dd2231f182240c35984b97 | refs/heads/master | 2021-09-07T03:23:41.933049 | 2018-02-16T14:43:56 | 2018-02-16T14:43:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py |
def firstArg():
print("\tPlease choose environment to push to:\n")
print("\t1 - Production")
print("\t2 - QA")
return getFirstResponse()
def secondArg():
print("\tEnter full path to file\n")
return getSecondResponse()
def getFirstResponse():
try:
env = int(raw_input(""))
if env == 1:
print("\tYou chose PROD")
elif env == 2:
print("\tYou chose QA")
else:
print("\tYou entry is invalid")
return env
except Exception as e:
print("\tInvalid Response")
def getSecondResponse():
try:
path = str(raw_input(""))
if path == "" or path == " ":
print("\tYou entry is invalid")
else:
return path
except Exception as e:
print("\tInvalid Response")
if __name__ == "__main__":
x = firstArg()
y = secondArg()
print("\n\tChoices are:")
print("\t{}".format(x))
print("\t{}".format(y)) | [
"noreply@github.com"
] | noreply@github.com |
a3143b698f1c73cdafd04e395e97248c88d0e310 | 1ae19fcd224100d4e03a4243209e126f353c956c | /calendar/views.py | 013a2ca51795b3c38aed142d5e60611fdd8f9b68 | [] | no_license | pianojet/fyfitness | 9610813399023dcba340652672275ae003af56f0 | 93f3d826fd258e6f7155b25e88aa342e44a819e9 | refs/heads/master | 2021-01-21T07:54:10.048220 | 2014-04-17T20:48:48 | 2014-04-17T20:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from utils import profile_required, hp_profile_required, tnc_required, goal_required
from datetime import datetime, time, timedelta, date
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from blog.forms import EntryForm, CommentForm
from blog.models import Entry, Comment
from membership.models import Member, HealthProfessional, Follow
from message.models import Message
@login_required
@profile_required
@hp_profile_required
@tnc_required
@goal_required
def browse(request, member_id=None):
message = ""
return render_to_response('calendar/browse.html', {
'message': message,
}, context_instance=RequestContext(request))
| [
"pianojet@gmail.com"
] | pianojet@gmail.com |
441ae3e5ed84cdfb6b25ea33d6dd5ef965f496a4 | 64f4567440971011d62c01dd06c11d4231bf7075 | /exceptionHandling/exception.py | 03f83b0c23da789249ba7b721a9a457ef992e725 | [] | no_license | harisankarkr/luminarpython | 4301f9e4833d69c9edd8e9ca67ac82c6b38fa388 | 203f22e676448869be61382214f0e6bbe78992d2 | refs/heads/master | 2023-02-20T21:13:32.889044 | 2021-01-25T03:05:19 | 2021-01-25T03:05:19 | 315,543,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | no1=int(input("enter number one:"))
no2=int(input("enter number two:"))
try:
res=no1/no2
print(res)
except:
print("division is not possible for this number")
try:
res=no1+no2
print(res)
except:
print("addition is not possible")
finally:
print("thank you")
print("visit again") | [
"you@sankarhari165@gmail.com"
] | you@sankarhari165@gmail.com |
31fe631c72c7f275c3d385b76ac97806f3f3636c | ddcc03cb7e9e69fa297cd1287bb99c3973605160 | /Aula 9/ex2.py | 4069b62ee8f8f0933aa3c4d6cfb9ee871b72416b | [] | no_license | GabrielRomanoo/Python | eb9a3e9ba8228eab071ded942fac67e98c1418a8 | 495fc682de0f28cedb57c084ad1cb801bb316a3a | refs/heads/master | 2021-07-05T13:53:49.693326 | 2021-01-07T22:46:33 | 2021-01-07T22:46:33 | 219,529,712 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Faça uma função que retorne a quantidade de
# espaços presentes em uma string.
from functools import reduce
x = 'Universidade Catolica de Santos'
espaco = list(filter(lambda s: s==' ', x))
#filter(funcao, sequencia)
#Retorna lista que pode ser de tamanho diferente da
#sequencia original. Retorna na verdade um objeto
#iterator, que deve ser convertido para uma list,
#utilizando o list()
#(lambda x,y : x+y)(1,2)
#Resultado 3
print(espaco)
espaco = len(espaco)
print(espaco)
#SAÍDA:
#[' ', ' ', ' ']
#3
| [
"noreply@github.com"
] | noreply@github.com |
17dee18492f3cda5da9d9d51a334e3a9d30cb7b9 | 9f28c77deec48ca899f560d41029297ae8d5deb0 | /coreConcepts/coreconcepts.py | 06f0c3ab743fcdd18f63172fcb270e80e02a056c | [] | no_license | saralafia/cc-arcpy | 83374b11ce54b0df8234c622b7abe5b4f6d71e95 | f48586af573ad50955a3119b53831d1c151ca376 | refs/heads/master | 2021-05-31T01:15:36.340914 | 2016-01-30T00:59:23 | 2016-01-30T00:59:26 | 50,746,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | # -*- coding: utf-8 -*-
from fields import *
from utils import *
class CcField(object):
"""
Class defining abstract field.
Based on Field.hs
"""
def __init__(self, fieldFunction, geoObject, geoEvent):
""" Define appropriate parameters for construction of the concrete object """
# TODO: restrict value pairs to geoObject
pass
def value_at( self, position ):
"""
@return the value of field at position, or None if it is outside of the domain.
"""
# TODO: check if position falls within value
raise NotImplementedError("valueAt")
def domain( self ):
"""
@return current domain of the field
"""
raise NotImplementedError("domain")
def restrict_domain(self, geometry ):
"""
@param domain a domain to be subtracted to the current domain
"""
raise NotImplementedError("restrict_domain")
def rect_neigh( self, position, width, height ):
"""
Map algebra: rectangular neighborhood function
@return Geometry (a field mask)
"""
raise NotImplementedError("rectNeigh")
def zone( self, position ):
"""
Map algebra: zone function
@return Geometry (a field mask)
"""
raise NotImplementedError("zone")
def local( self, fields, fun ):
"""
Map algebra's local operations, with a function to compute the new values
@param fields other fields
@return new CcField field
"""
raise NotImplementedError("local")
def focal( self, fields, fun ):
"""
Map algebra's focal operations, with a kernel function to compute the new values based on the neighborhood of the position
@return new CcField field
"""
raise NotImplementedError("focal")
def zonal( self, fields, fun ):
"""
Map algebra's zonal operations, with a function to compute the new values based on zones containing the positions.
@return new CcField field
"""
raise NotImplementedError("zonal")
class CcObject(object):
"""
Abstract class for core concept 'object'
Based on Object.hs
"""
def bounds( self ):
raise NotImplementedError("bounds")
def relation( self, obj, relType ):
""" @return Boolean True if self and obj are in a relationship of type relType
False otherwise
"""
raise NotImplementedError("relation")
def property( self, prop ):
"""
@param prop the property name
@return value of property in obj
"""
raise NotImplementedError("property")
def identity( self, obj ):
"""
@param an object
@return Boolean True if self and obj are identical
"""
raise NotImplementedError("identity")
class CcGranularity:
def __init__(self):
pass
# TODO: cell_size_x, cell_size_y | [
"thomasahervey@gmail.com"
] | thomasahervey@gmail.com |
45ab33b9b716d44339d032313a4da2826ad54d87 | 3f140decb8daedf23dbb09aa7da2a8203f76ce70 | /json_utils.py | aba290327cb01f5fb9fb056f72f7f9f627416be2 | [] | no_license | amazingsmash/OSM-LiDAR-Segmenter | 9c3e61b33ce3506640f5226debe8d0a8217608df | 527679e4d1de803e3572259f5f1127865c24cf26 | refs/heads/master | 2022-07-17T07:30:59.112367 | 2020-05-21T11:21:20 | 2020-05-21T11:21:20 | 233,594,238 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import json
import os
def read_json(filename):
with open(filename) as data_file:
data = json.load(data_file)
return data
def write_json(data, filename):
folder = os.path.dirname(filename)
if len(folder) > 0 and not os.path.isdir(folder):
os.makedirs(folder)
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4) | [
"josemiguelsn@Joses-MacBook-Pro.local"
] | josemiguelsn@Joses-MacBook-Pro.local |
b6bde677aac4f26f15c0fe037c8ece62d778b970 | f4de413ad77ffaa9b2e7d65e1579a8d2696c0c42 | /classifier/rnn.py | 93be5b6baf981193a36d1dee3fc2ddf89ffa91f5 | [] | no_license | BinbinBian/Parable | b4d93d4fef2bb02f19cb3571501c8a8162045ff1 | f2ceb0b9a5749db7578c95edcbd2a26adb7249cf | refs/heads/master | 2021-01-17T18:44:06.129814 | 2016-05-07T06:13:35 | 2016-05-07T06:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from rnn_layers import *
import theano
import numpy as np
class RNNEncoderDecoder(object):
"""
A RNN Encoder-Decoder Framework
"""
class StochasticRNN(object):
"""
RNN that can encode arbitrarily long sequence
(thousands of time steps)
(best for QA, Paragraph chunking tasks)
""" | [
"leo.niecn@gmail.com"
] | leo.niecn@gmail.com |
5f95567bceaf7b570e56328ed86f10ff0b772f05 | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Demo/cgi/cgi2.py | d956f6538c63219fc0c7486a6b8aec4cd0f38de9 | [
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"Apache-2.0"
] | permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 472 | py | #!/usr/local/bin/python
"""CGI test 2 - basic use of cgi module."""
import cgitb; cgitb.enable()
import cgi
def main():
form = cgi.FieldStorage()
print "Content-type: text/html"
print
if not form:
print "<h1>No Form Keys</h1>"
else:
print "<h1>Form Keys</h1>"
for key in form.keys():
value = form[key].value
print "<p>", cgi.escape(key), ":", cgi.escape(value)
if __name__ == "__main__":
main()
| [
"damonkohler@gmail.com"
] | damonkohler@gmail.com |
0fd72ad87e2508744a10dd94eab65a8c9f0ac602 | bf76258e4c95a76c10942332d96eb22c1b149511 | /project/register/migrations/0001_initial.py | 8e9948ab078b010b6cfa86ceddb7fb5ab46f56d3 | [] | no_license | sahilchhillar/Shake-Bike-Sharing-website | bd59361fb467e6fa0cc872d81773af81e4507e8b | 0b55274294805a14ea15b486757fe779d4cd1cc4 | refs/heads/master | 2023-09-05T17:32:17.187503 | 2021-11-17T23:55:31 | 2021-11-17T23:55:31 | 420,390,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 3.1.7 on 2021-10-14 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Register',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=10)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=10)),
('confirm_password', models.CharField(max_length=10)),
],
),
]
| [
"panchalchh@outlook.com"
] | panchalchh@outlook.com |
c8b4d1a8b1b915b38b7ef0e64bf0488d364797c4 | 9b64a675635199c838bd7407b0ed17c1f08df2b8 | /Home/views.py | 8c8298443b3923f7f447b0d4b38e9465d53e02a2 | [] | no_license | prasetyaa/MVC_pythonanywhere | e15d4bbe86e92ab7433e7167a24c789d8ffbbe2a | a42c3f131910e446d1d14ea3ac812c25c82d0641 | refs/heads/master | 2020-04-22T14:11:02.883252 | 2019-02-15T04:19:07 | 2019-02-15T04:19:07 | 170,434,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def kegiatan_kegiatan_ATA(request):
return render(request, 'rumah/kegiatan_kegiatan_ATA.html', {}) | [
"intan@alphatech.id"
] | intan@alphatech.id |
cf1742ba9dff8be93530c38000c10e8be2674bf8 | 694832900728bc843113da521135567b12336029 | /LIST/反转链表2.py | ae3a345b09a7a7b7ea9bbcbf2471fc9ee0cc264e | [] | no_license | earsonlau/westworld | 42b47804d1af18325e7c7785663e57805787bce4 | 60b1618f999d713d97514b9e512f70a53b46a2c8 | refs/heads/master | 2023-08-05T05:53:00.476355 | 2023-08-01T11:27:15 | 2023-08-01T11:27:15 | 255,011,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | # 反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。
#
# 说明:
# 1 ≤ m ≤ n ≤ 链表长度。
#
# 示例:
#
# 输入: 1->2->3->4->5->NULL, m = 2, n = 4
# 输出: 1->4->3->2->5->NULL
# 思路:
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# 反转以 head 为起点的 n 个节点,返回新的头节点
def reverseN(self,head,n):
successor = ListNode(None)
if n == 1 :
# 记录第n个节点
successor = head.next
return head
# 以 head.next 为起点,需要反转前 n - 1 个节点
last = self.reverseN(head.next, n - 1)
# 让 head.next 的next指针指回head(反转指针
head.next.next = head
# 让反转后的 head 节点和后面的节点连起来
head.next = successor#successor是不变的第n+1个节点
return last
def reverseBetween(self,head,m,n):
# base case
if m == 1 :
return self.reverseN(head,n)
# 前进到反转的起点触发 base case
head.next = self.reverseBetween(head.next, m - 1, n -1 )
return head
| [
"earsonlau@gmail.com"
] | earsonlau@gmail.com |
38c1b97f98e55e8fc3eab0cdf5da3c49433ad9ee | 37974a660f49b583c91e889a69bfae096e18a012 | /twoSumHash.py | e8f1041e8146ab5d2ba57d55a1753996cb95a488 | [] | no_license | ShamikBasu/Basic-Interview-Codes | 223aef9824513a5d3d8ace2c3c80777d2df6ee5e | 12d55c60b356321e8955c2a6ea2419fb6b04bbf8 | refs/heads/main | 2023-04-27T01:39:30.815182 | 2023-04-17T13:11:28 | 2023-04-17T13:11:28 | 302,637,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | n=int(input("ENTER THE LENGTH"))
a=[]
for i in range(0,n):
a.append(int(input()))
nums={}
t=int(input("ENTER THE Target"))
for num in a:
if t-num in nums:
print([t-num,num])
else:
nums[num]=True | [
"noreply@github.com"
] | noreply@github.com |
53e60a6387d3a899ed311a33fdaded25fdf5e460 | c725fc58d217f6730687a565fbf85fcf174e8009 | /code_SDSS/sql_bright_star.py | 549fb863ca8b726b5c8de71ac5c9955cb27620e0 | [] | no_license | Kein-Cary/Intracluster-Light | 6faca2bd0413244765474beeffd53cfaa401eef2 | ffcb2d6ea10be45422c7e73408fc6ff6cadf3a85 | refs/heads/master | 2023-03-18T04:51:06.539453 | 2023-03-12T02:48:01 | 2023-03-12T02:48:01 | 160,816,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | import h5py
import numpy as np
import astropy.io.fits as fits
import mechanize
import pandas as pd
from io import StringIO
import astropy.units as U
import astropy.constants as C
from astropy import cosmology as apcy
#url = 'http://skyserver.sdss.org/dr12/en/tools/search/sql.aspx'
url = 'http://cas.sdss.org/dr7/en/tools/search/sql.asp'
load = '/media/xkchen/My Passport/data/SDSS/'
with h5py.File(load + 'mpi_h5/sample_catalog.h5', 'r') as f:
catalogue = np.array(f['a'])
z = catalogue[0]
ra = catalogue[1]
dec = catalogue[2]
#r_select = 0.16676 # centered at BCG, radius = 10 arcmin (1515.15 pixel)
r_select = 0.42 ## 1.5 * diagonal line length
N_tot = len(z)
sub_N = N_tot * 1
no_match = []
for kk in range( N_tot ):
ra_g = ra[kk]
dec_g = dec[kk]
z_g = z[kk]
c_ra0 = str(ra_g - r_select)
c_dec0 = str(dec_g - r_select)
c_ra1 = str(ra_g + r_select)
c_dec1 = str(dec_g + r_select)
# query stars and saturated sources (may not be stars)
data_set = """
SELECT ALL
p.ra, p.dec, p.u, p.g, p.r, p.i, p.z, p.type,
p.isoA_u, p.isoA_g, p.isoA_r, p.isoA_i, p.isoA_z,
p.isoB_u, p.isoB_g, p.isoB_r, p.isoB_i, p.isoB_z,
p.isoPhi_u, p.isoPhi_g, p.isoPhi_r, p.isoPhi_i, p.isoPhi_z,
p.flags, dbo.fPhotoFlagsN(p.flags)
FROM PhotoObj AS p
WHERE
p.ra BETWEEN %s AND %s AND p.dec BETWEEN %s AND %s
AND (p.type = 6 OR (p.flags & dbo.fPhotoFlags('SATURATED')) > 0)
ORDER by p.r
""" % (c_ra0, c_ra1, c_dec0, c_dec1)
br = mechanize.Browser()
resp = br.open(url)
resp.info()
br.select_form(name = "sql")
br['cmd'] = data_set
br['format'] = ['csv']
response = br.submit()
s = str(response.get_data(), encoding = 'utf-8')
doc = open('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g), 'w')
print(s, file = doc)
doc.close()
try:
cat = pd.read_csv('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g),)
try_ra = np.array(cat.ra)
except:
no_match.append('%d, %.3f,%.3f,%.3f' % (kk, ra_g, dec_g, z_g) )
sub_N -= 1
doc = open('No_source_match_sample.txt', 'w')
for ll in range(len(no_match)):
subx = no_match[ll]
print(subx, file = doc)
doc.close()
print(sub_N)
| [
"cxkast@gmail.com"
] | cxkast@gmail.com |
42931988a9e68bd1b649befed66096c6e42a742e | 66d79bfbee00191ea54a3cf762f8de0d359758e9 | /src/models.py | 7948e0dfe7178b673734f0b0da0d04cce7c32ed7 | [
"MIT"
] | permissive | personads/smu | 19bb748d77da82042d4eae5316044f90760384c9 | e954b17a55c2ccbeaa7030b67d7ff5e3e6cda1fe | refs/heads/master | 2021-05-13T11:50:16.986621 | 2018-01-11T19:18:56 | 2018-01-11T19:18:56 | 117,137,779 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,499 | py | '''
SMT Tools
'''
from utils import *
from math import log, exp
from collections import defaultdict
from sys import stdout
#
# functions
#
def train_model1(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 1
returns the translation probability t = {(e,f) : prob}
'''
if verbose : print(" - training IBM Model 1 - ")
# initialize t uniformly
t = defaultdict(lambda: 1./corpus.count_unique_f())
# training loop
for i in range(iterations) :
count = defaultdict(lambda:0.)
total = defaultdict(lambda:0.)
stotal = {}
for index_pair, pair in enumerate(corpus) :
if (verbose) and ( ((index_pair+1)%100 == 0) or (i+1 == iterations) ):
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d token pairs'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(t.keys())))
stdout.flush()
# insert null token
sentence_f = [""] + pair[0]
sentence_e = [""] + pair[1]
# compute normalization
for token_e in sentence_e :
stotal[token_e] = 0
for token_f in sentence_f :
stotal[token_e] += t[(token_e,token_f)]
# collect counts
for token_e in sentence_e :
for token_f in sentence_f :
count[(token_e,token_f)] += t[(token_e,token_f)] / stotal[token_e]
total[token_f] += t[(token_e,token_f)] / stotal[token_e]
if total[token_f] == 0 :
print(token_f, total[token_f])
# probability estimation
for token_e, token_f in corpus.get_token_pairs() :
t[(token_e,token_f)] = count[(token_e,token_f)] / total[token_f]
corpus.reset_iter()
if verbose : print("\n - training of IBM Model 1 complete - ")
return dict(t)
def train_model2(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 2
returns (t, a)
the translation probability t = {(e,f) : prob}
the alignment probability a = {(i,j,l_e,l_f) : prob }
'''
if verbose : print(" - training IBM Model 2 - ")
t = {}
a = {}
# initialize t according to Model 1
if verbose : print("initialize t according to Model 1...")
t = train_model1(corpus, iterations, verbose=verbose)
# initialize a uniformly
for pair in corpus :
length_f = len(pair[0])+1
length_e = len(pair[1])+1
for index_f in range(length_f) :
for index_e in range(length_e) :
a[(index_f,index_e,length_e,length_f)] = 1./(length_f+1)
# training loop
for i in range(iterations) :
count_t = defaultdict(lambda:0)
total_t = defaultdict(lambda:0)
count_a = defaultdict(lambda:0)
total_a = defaultdict(lambda:0)
stotal = {}
corpus.reset_iter()
for index_pair, pair in enumerate(corpus) :
if (verbose) and ( ((index_pair+1)%100 == 0) or (i+1 == iterations) ):
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d alignments'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(a.keys())))
stdout.flush()
sentence_f = [""] + pair[0] # insert null token
sentence_e = [""] + pair[1]
length_f = len(sentence_f)
length_e = len(sentence_e)
# compute normalization
for index_e, token_e in enumerate(sentence_e) :
stotal[token_e] = 0
for index_f, token_f in enumerate(sentence_f) :
stotal[token_e] += t[(token_e,token_f)] * a[(index_f,index_e,length_e,length_f)]
# collect counts
for index_e, token_e in enumerate(sentence_e) :
for index_f, token_f in enumerate(sentence_f) :
update_c = t[(token_e,token_f)] * a[(index_f,index_e,length_e,length_f)]/stotal[token_e]
count_t[(token_e,token_f)] += update_c
total_t[token_f] += update_c
count_a[(index_f,index_e,length_e,length_f)] += update_c
total_a[(index_e,length_e,length_f)] += update_c
# probability estimation
for token_e, token_f in t.keys() :
t[(token_e, token_f)] = count_t[(token_e, token_f)] / total_t[token_f]
for alignment in a.keys() :
a[alignment] = count_a[alignment] / total_a[alignment[1:]]
if verbose : print("\n - training of IBM Model 2 complete - ")
return dict(t), dict(a)
def train_model3(corpus, iterations, verbose=False) :
'''
EM training function according to IBM Model 3
returns (t, d, f, n)
the translation probability t = {(e,f) : prob}
the distortion probability d = {(j,i,l_e,l_f) : prob }
the fertility probability f = {(n,f) : prob }
the null non-insertion probability p0 = prob
'''
if verbose : print(" - training IBM Model 3 - ")
t = {}
d = {}
f = {}
p0 = None
# initialize t,d according to Model 2
if verbose : print("initialize t, d according to Model 2...")
t, d = train_model2(corpus, iterations*2, verbose=verbose)
# remap distributions t, d
for pair in t :
# convert and filter 0 probabilites
if t[pair] > 0 : t[pair] = log(t[pair])
remap_d = {}
for align in d :
# convert and filter 0 probabilites
if d[align] > 0 : remap_d[(align[1], align[0], align[2], align[3])] = log(d[align])
d = remap_d
# training loop
for i in range(iterations) :
count_t = defaultdict(lambda:0)
total_t = defaultdict(lambda:0)
count_d = defaultdict(lambda:0)
total_d = defaultdict(lambda:0)
count_f = defaultdict(lambda:0)
total_f = defaultdict(lambda:0)
count_null = 0
count_p1 = 0
count_p0 = 0
stotal = {}
corpus.reset_iter()
for index_pair, pair in enumerate(corpus) :
if (verbose) :
stdout.write(('\rtraining iteration : %d of %d | %d of %d sentence pairs | %d alignments | %d fertiliy values |'+(' '*10)) % (i+1, iterations, index_pair+1, len(corpus), len(d.keys()), len(f.keys())))
stdout.flush()
# initialize local pair variables
sentence_f = [""] + pair[0] # insert null token
sentence_e = [""] + pair[1]
length_f = len(sentence_f)
length_e = len(sentence_e)
# get sample alignments
sample_alignments = sample_model3(sentence_e, sentence_f, t, d)
if sample_alignments is None :
# skip if no valid alignments are found
continue
sample_probs = []
count_total = 0
valid_alignments = []
for align in sample_alignments :
align_prob = align.get_probability(d)
for index_f, token_f in enumerate(sentence_f) :
token_e = sentence_e[align.get_index_e(index_f)]
if (token_e, token_f) in t :
cur_sample_prob = t[(token_e, token_f)]+align_prob # log probability
valid_alignments.append(align)
sample_probs.append(cur_sample_prob)
sample_alignments = valid_alignments
min_sample_prob = min(sample_probs)
for index_prob in range(len(sample_probs)) :
sample_probs[index_prob] = -1*min_sample_prob + sample_probs[index_prob]
count_norm = -1*min_sample_prob
for index_align, align in enumerate(sample_alignments) :
# normalize log probabilities as count
if sample_probs[index_align] == 0 :
count = 1
else :
count = sample_probs[index_align] / count_norm
for index_f, token_f in enumerate(sentence_f) :
index_e = align.get_index_e(index_f)
token_e = sentence_e[index_e]
count_t[(token_e, token_f)] += count
total_t[token_f] += count
count_d[(index_e, index_f, length_e, length_f)] += count
total_d[(index_f, length_e, length_f)] += count
if index_e == 0 :
count_null += 1
count_p1 += count_null * count
count_p0 += (length_e - 2 * count_null) * count
for index_f in range(length_f) :
fertility = 0
for index_e in range(length_e) :
if (index_e == align.get_index_e(index_f)) and (align.get_index_e(index_f) != 0) :
fertility += 1
count_f[(fertility, sentence_f[index_f])] += count
total_f[sentence_f[index_f]] += count
# probability estimation
t = {}
d = {}
f = {}
for token_e, token_f in count_t.keys() :
cur_prob_t = count_t[(token_e, token_f)] / total_t[token_f]
if cur_prob_t > 0 : t[(token_e, token_f)] = log(cur_prob_t) # log probability
for index_e, index_f, length_e, length_f in count_d.keys() :
cur_prob_d = count_d[(index_e, index_f, length_e, length_f)] / total_d[(index_f, length_e, length_f)]
if cur_prob_d > 0 : d[(index_e, index_f, length_e, length_f)] = log(cur_prob_d) # log probability
for fertility, token_f in count_f.keys() :
cur_prob_f = count_f[(fertility, token_f)] / total_f[token_f]
if cur_prob_f > 0 : f[(fertility, token_f)] = log(cur_prob_f) # log probability
p1 = count_p1 / (count_p0 + count_p1)
p0 = 1 - p1
if verbose : print("\n - training of IBM Model 3 complete - ")
return dict(t), dict(d), dict(f), p0
def sample_model3(sentence_e, sentence_f, prob_t, prob_d) :
res = []
length_e = len(sentence_e)
length_f = len(sentence_f)
# determine argmax over index_e
argmax_token_alignments = []
for index_f in range(length_f) :
max_alignment = (None, None)
for try_e in range(length_e) :
cur_prob_t = None
if (sentence_e[try_e], sentence_f[index_f]) in prob_t.keys() :
cur_prob_t = prob_t[(sentence_e[try_e], sentence_f[index_f])]
cur_prob_d = None
if (try_e, index_f, length_e, length_f) in prob_d.keys() :
cur_prob_d = prob_d[(try_e, index_f, length_e, length_f)]
if (cur_prob_t is not None) and (cur_prob_d is not None) :
cur_prob = cur_prob_t + cur_prob_d # log probability
if (max_alignment[1] is None) or (cur_prob > max_alignment[1]):
max_alignment = (try_e, cur_prob)
if max_alignment[0] is None:
argmax_token_alignments = None
break
argmax_token_alignments.append(max_alignment[0])
if argmax_token_alignments is not None :
cur_alignment = alignment(length_e, length_f, argmax_token_alignments)
res.append(cur_alignment)
else :
# cur_alignment = alignment(length_e, length_f)
return None
# perform sampling
# for index_pegged in range(length_f) :
# # cur_alignment = cur_alignment.hillclimb(prob_d, index_pegged)
# # if cur_alignment not in res :
# # res.append(cur_alignment)
# for neighbor in cur_alignment.get_neighbors(index_pegged) :
# if (neighbor not in res) and (neighbor.get_probability(prob_d) is not None) :
# res.append(neighbor)
return res
def train_lm(corpus, n_length, verbose=False) :
if verbose : print(" - training "+str(n_length)+"-gram language model - ")
res = {}
# collect counts
counts = {}
for n in range(1,n_length+1) :
res[n] = {}
counts[n] = {}
for index_sen, sentence in enumerate(corpus) :
if (verbose) and ((index_sen+1)%100 == 0):
stdout.write(('\rtraining : %d of %d sentences'+(' '*10)) % (index_sen+1, len(corpus)))
stdout.flush()
sentence = ["<s>"] + sentence + ["</s>"]
for index_token in range(len(sentence)) :
for n in range(1, n_length+1):
ngram = tuple(sentence[index_token:(index_token+n)])
if index_token+n <= len(sentence) :
if ngram in counts[n] :
counts[n][ngram] += 1
else :
counts[n][ngram] = 1
# probability estimation
if verbose : print("\nestimating probabilites...")
for n in range(1,n_length+1) :
for ngram in counts[n] :
if n > 1 :
res[n][(ngram[len(ngram)-1],)+ngram[:-1]] = log(counts[n][ngram] / counts[n-1][ngram[:n-1]])
else :
res[n][ngram] = log(counts[n][ngram] / len(counts[n].keys()))
if verbose : print(" - training complete - ")
return res
| [
"work@personads.me"
] | work@personads.me |
ee977d4256e3ec68006d3288301f797322b991c0 | 5308f19fa60215f2d44aa4530230075c245b3dad | /odoo/openerp/addons/base/res/res_config.py | 9f1963148f337367bb8ba5a626a6e991f8a33de9 | [] | no_license | ihyf/raspberry_pi | c5c5fe791f021de4356a442717450c815f858a81 | d8a531ae9ade5f3e1f49c7d1b21583fbe1b8c09e | refs/heads/master | 2020-06-11T07:57:19.140772 | 2017-01-04T12:00:59 | 2017-01-04T12:00:59 | 75,728,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,652 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context={}):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, uid, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, uid, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, uid, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"huangyingfei@hollywant.com"
] | huangyingfei@hollywant.com |
b023906757f0266c579b3042d843bdd4da38d017 | 8126291334a4288f51b1116ea31e953debf07039 | /SRC/engine/IO/propertyoutput.spy | 11311550633bb57671c61075db7d567d2fda3223 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | jumpingyu/OOF2 | 846a7dd506f029535153834607b698ce32dc155d | 31a25398b046c1963859dd96785329d2a9af8681 | refs/heads/master | 2020-05-21T09:12:07.013560 | 2019-04-02T21:05:49 | 2019-04-02T21:05:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,216 | spy | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import symmmatrix
from ooflib.common import debug
from ooflib.common import utils
from ooflib.engine.IO import output
from ooflib.engine.IO import outputClones
import types, sys
# The PropertyOutputRegistration subclasses create an Output object
# for each registered PropertyOutput. This bridges the gap between
# the C++ PropertyOutputs and the more general Python Outputs.
class PORegBase(PropertyOutputRegistration):
## Callback for all PropertyOutputs. Outputs that need to return
## something other than a list of OutputVal instances should
## override the convert method.
def opfunc(self, mesh, elements, coords, **params):
po = self.instantiate(params)
mesh.precompute_all_subproblems()
initializer = self.initializer()
results = []
for element, ecoords, in zip(elements, coords):
mat = element.material()
mesh.begin_all_subproblems(element)
results.extend(po.evaluate(mesh, element, initializer, ecoords))
mesh.end_all_subproblems(element)
return self.convert(results)
def convert(self, results):
return results
##### Scalar outputs
class ScalarPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ScalarPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.ScalarOutputValPtr,
instancefn=outputClones.scalar_instancefn,
column_names=outputClones.single_column_name,
params=parameters,
srepr=srepr, tip=tip, discussion=discussion)
output.defineScalarOutput(name, op, ordering=ordering)
output.defineAggregateOutput(name, op, ordering=ordering)
# def convert(self, results): # convert from ScalarOutputVal to Float
# return [r.value() for r in results]
##### SymmMatrix3 outputs.
def _symmmatrix3_instancefn(self):
return symmmatrix.SymmMatrix3(0.,0.,0.,0.,0.,0.)
def _symmmatrix3_column_names(self):
sr = self.shortrepr()
names = []
it = self.outputInstance().getIterator()
while not it.end():
names.append("%s[%s]" % (sr, it.shortstring()))
it.next()
return names
class SymmMatrix3PropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or SymmMatrix3PropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=_symmmatrix3_instancefn,
srepr=srepr,
column_names=_symmmatrix3_column_names,
params=parameters,
tip=tip, discussion=discussion)
output.defineAggregateOutput(name+":Value", op, ordering=ordering)
def comprepr(s):
comp = s.resolveAlias("component").value
# We have to pass s to op.shortrepr so that the shortrepr
# will be computed for the actual Output, not the Output
# defined above. The actual output will be a clone of the
# one defined there.
return "%s[%s]" % (op.shortrepr(s), comp)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
srepr=comprepr,
discussion=
"""
<para>Compute the specified component of %s on a &mesh;.</para>
"""
% name)
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
def invariantrepr(s):
invariant = s.resolveAlias("invariant").value.shortrepr()
# See comment above about op.shortrepr(s)
return "%s(%s)" % (invariant, op.shortrepr(s))
invout = outputClones.InvariantOutput.clone(
name=name+" Invariant",
srepr=invariantrepr,
tip='Compute invariants of %s' % name,
discussion="""
<para>Compute the specified invariant of %s on a &mesh;.</para>
"""
% name)
invout.connect('field', op)
for param in parameters:
invout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Invariant", invout, ordering=ordering)
output.defineAggregateOutput(name+":Invariant", invout,
ordering=ordering)
# ThreeVector outputs
## TODO 3D: These should add themselves as "Value" outputs, and there
## should be an "Invariant" output, also, since 3-vectors have a
## magnitude. srepr's and column_name's need to be adjusted/provided.
## None of this is implemented yet because there are no
## ThreeVectorPropertyOutputs to test it on.
class ThreeVectorPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ThreeVectorPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=outputClones.vector_instancefn,
params=parameters,
srepr=srepr, tip=tip,
discussion=discussion)
output.defineAggregateOutput(name, op, ordering=ordering)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
discussion=
"""
<para>Compute the specified component of <link
linkend='Output-%s'>%s</link> on a &mesh;.</para>
"""
% (name, name))
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:'+param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
| [
"lnz5@rosie.nist.gov"
] | lnz5@rosie.nist.gov |
bb5b811d9537528cae86fecc4041513611da8c81 | 89f77da07bddeea68d7a4af3f0bf7ba6ae88de80 | /cgi_programming/launching_jupyter.cgi | 8f09ff8158be623fbe32211f3e13221e1767f8e9 | [] | no_license | Harshhg/Python | f865c9c727c288d85c961fc8faff4079bfb46e01 | 0322737d0d94bf2fe0886317f5087b405db4eddb | refs/heads/master | 2020-04-27T01:53:00.908413 | 2020-01-08T13:18:29 | 2020-01-08T13:18:29 | 173,977,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | cgi | import cgi,subprocess
import cgi
import cgitb
import os
import time
import webbrowser
cgitb.enable()
print("Content-type:text/html")
print("")
web=cgi.FieldStorage()
data=web.getvalue('install')
print("Jupyter is installing and running")
subprocess.getoutput('sudo pip3 install jupyter')
os.system("jupyter-notebook --ip=0.0.0.0 --port=8888 &>/dev/null &")
time.sleep(5)
x=subprocess.getoutput("jupyter-notebook list | sed 1d | awk '{print $1}' | awk -F[?:] '{print $4}'")
url="http://13.233.108.214:8888/?"+x
print('''
<html>
<head>
<script type="text/javascript">
function load()
{
window.open(%s,'_blank');
}
</script>
</head>
<body onload="load()">
</body>
</html>
''' %url)
| [
"noreply@github.com"
] | noreply@github.com |
0705a6b331883e33c9fb69a9749b737c41d38af1 | d51a656e482fa516113c2d9341de7dae317fbb81 | /Board.py | 0e29610e4231e428adaf6d9b8a190eea54ba4c30 | [] | no_license | brmaciel/Battleship | 5492341a55f967b244b72730674debe3915701ab | 71d0e5e836beb5fa85f358d5936b1285b6b53532 | refs/heads/master | 2020-04-13T02:57:51.994608 | 2019-01-05T16:11:55 | 2019-01-05T16:11:55 | 162,916,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | class Board(object):
__size = 10
def __init__(self, nome):
self.nome = nome
self.__tabuleiro = []
self.__defineBoard()
def __defineBoard(self):
for n in range(self.__size):
self.__tabuleiro.append(['_'] * self.__size)
def printBoard(self):
for rows in self.__tabuleiro:
print("\t", " ".join(rows))
print("\n")
def setMark_on_board(self, row, col, mark):
self.__tabuleiro[row][col] = mark
def eraseMark(self, mark):
for posX in range(1, self.__size):
for posY in range(1, self.__size):
if self.__tabuleiro[posX - 1][posY - 1] == mark:
self.setMark_on_board(posX - 1, posY - 1, "_")
### Metodos Get ###
@property
def size(self):
return self.__size
def getBoardPosition(self, row, col):
return self.__tabuleiro[row][col]
| [
"brunomaciel.xc4@gmail.com"
] | brunomaciel.xc4@gmail.com |
484968ae72d6124a5b058424f30e432f7844d3b0 | 77a5332852c30c6cc52378e39d1a87a93b0d614a | /scripts/get_windows_temperature.py | 2234ba4554f4fd9969f8588596e39ab0f0bb2b29 | [] | no_license | alexandre-mazel/electronoos | b6798bd81e5d81490fa6b7c906cdb753f0149682 | 12217d53c68a2fc3c1f9b74876fc2699e764fcba | refs/heads/master | 2023-08-17T11:04:51.920710 | 2023-08-14T11:54:31 | 2023-08-14T11:54:31 | 30,472,238 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,369 | py | #way to get temperature on windows but:
# nothing works on my ms4
import ctypes
import ctypes.wintypes as wintypes
from ctypes import windll
def othermethod():
import wmi
w = wmi.WMI()
print(dir(w))
print(w.Win32_TemperatureProbe())
#print(w.Win32_TemperatureProbe()[0].CurrentReading) # not filled on mstab4
w = wmi.WMI(namespace="root\wmi")
temperature_info = w.MSAcpi_ThermalZoneTemperature()[0]
print("temp info: %s" % str(w.MSAcpi_ThermalZoneTemperature()))
print( "root temp: %s" % temperature_info.CurrentTemperature ) # tout le temps: 3462
for i in range(10):
try:
print( "iter temp %d: %s" % (i,w.MSAcpi_ThermalZoneTemperature()[i].CurrentTemperature) ) # tout le temps: 3462,2732,2025,3192
except: pass
if 0:
for i in range(10):
try:
print( "iter temp %d: %s" % (i,dir(w.MSAcpi_ThermalZoneTemperature()[i])) )
except: pass
LPDWORD = ctypes.POINTER(wintypes.DWORD)
LPOVERLAPPED = wintypes.LPVOID
LPSECURITY_ATTRIBUTES = wintypes.LPVOID
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
FILE_SHARE_WRITE=0x00000004
ZERO=0x00000000
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
FILE_ATTRIBUTE_NORMAL = 0x00000080
INVALID_HANDLE_VALUE = -1
FILE_DEVICE_UNKNOWN=0x00000022
METHOD_BUFFERED=0
FUNC=0x900
FILE_WRITE_ACCESS=0x002
NULL = 0
FALSE = wintypes.BOOL(0)
TRUE = wintypes.BOOL(1)
def CTL_CODE(DeviceType, Function, Method, Access): return (DeviceType << 16) | (Access << 14) | (Function <<2) | Method
def _CreateFile(filename, access, mode, creation, flags):
"""See: CreateFile function http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).asp """
CreateFile_Fn = windll.kernel32.CreateFileW
CreateFile_Fn.argtypes = [
wintypes.LPWSTR, # _In_ LPCTSTR lpFileName
wintypes.DWORD, # _In_ DWORD dwDesiredAccess
wintypes.DWORD, # _In_ DWORD dwShareMode
LPSECURITY_ATTRIBUTES, # _In_opt_ LPSECURITY_ATTRIBUTES lpSecurityAttributes
wintypes.DWORD, # _In_ DWORD dwCreationDisposition
wintypes.DWORD, # _In_ DWORD dwFlagsAndAttributes
wintypes.HANDLE] # _In_opt_ HANDLE hTemplateFile
CreateFile_Fn.restype = wintypes.HANDLE
return wintypes.HANDLE(CreateFile_Fn(filename,
access,
mode,
NULL,
creation,
flags,
NULL))
handle=_CreateFile('\\\\.\\AdvLmDev',GENERIC_WRITE,FILE_SHARE_WRITE,OPEN_EXISTING,ZERO)
def _DeviceIoControl(devhandle, ioctl, inbuf, inbufsiz, outbuf, outbufsiz):
"""See: DeviceIoControl function
http://msdn.microsoft.com/en-us/library/aa363216(v=vs.85).aspx
"""
DeviceIoControl_Fn = windll.kernel32.DeviceIoControl
DeviceIoControl_Fn.argtypes = [
wintypes.HANDLE, # _In_ HANDLE hDevice
wintypes.DWORD, # _In_ DWORD dwIoControlCode
wintypes.LPVOID, # _In_opt_ LPVOID lpInBuffer
wintypes.DWORD, # _In_ DWORD nInBufferSize
wintypes.LPVOID, # _Out_opt_ LPVOID lpOutBuffer
wintypes.DWORD, # _In_ DWORD nOutBufferSize
LPDWORD, # _Out_opt_ LPDWORD lpBytesReturned
LPOVERLAPPED] # _Inout_opt_ LPOVERLAPPED lpOverlapped
DeviceIoControl_Fn.restype = wintypes.BOOL
# allocate a DWORD, and take its reference
dwBytesReturned = wintypes.DWORD(0)
lpBytesReturned = ctypes.byref(dwBytesReturned)
status = DeviceIoControl_Fn(devhandle,
ioctl,
inbuf,
inbufsiz,
outbuf,
outbufsiz,
lpBytesReturned,
NULL)
return status, dwBytesReturned
class OUTPUT_temp(ctypes.Structure):
"""See: http://msdn.microsoft.com/en-us/library/aa363972(v=vs.85).aspx"""
_fields_ = [
('Board Temp', wintypes.DWORD),
('CPU Temp', wintypes.DWORD),
('Board Temp2', wintypes.DWORD),
('temp4', wintypes.DWORD),
('temp5', wintypes.DWORD)
]
class OUTPUT_volt(ctypes.Structure):
"""See: http://msdn.microsoft.com/en-us/library/aa363972(v=vs.85).aspx"""
_fields_ = [
('VCore', wintypes.DWORD),
('V(in2)', wintypes.DWORD),
('3.3V', wintypes.DWORD),
('5.0V', wintypes.DWORD),
('temp5', wintypes.DWORD)
]
def get_temperature():
FUNC=0x900
outDict={}
ioclt=CTL_CODE(FILE_DEVICE_UNKNOWN, FUNC, METHOD_BUFFERED, FILE_WRITE_ACCESS)
handle=_CreateFile('\\\\.\\AdvLmDev',GENERIC_WRITE,FILE_SHARE_WRITE,OPEN_EXISTING,ZERO)
win_list = OUTPUT_temp()
p_win_list = ctypes.pointer(win_list)
SIZE=ctypes.sizeof(OUTPUT_temp)
status, output = _DeviceIoControl(handle, ioclt , NULL, ZERO, p_win_list, SIZE)
for field, typ in win_list._fields_:
#print ('%s=%d' % (field, getattr(disk_geometry, field)))
outDict[field]=getattr(win_list,field)
return outDict
def get_voltages():
FUNC=0x901
outDict={}
ioclt=CTL_CODE(FILE_DEVICE_UNKNOWN, FUNC, METHOD_BUFFERED, FILE_WRITE_ACCESS)
handle=_CreateFile('\\\\.\\AdvLmDev',GENERIC_WRITE,FILE_SHARE_WRITE,OPEN_EXISTING,ZERO)
win_list = OUTPUT_volt()
p_win_list = ctypes.pointer(win_list)
SIZE=ctypes.sizeof(OUTPUT_volt)
status, output = _DeviceIoControl(handle, ioclt , NULL, ZERO, p_win_list, SIZE)
for field, typ in win_list._fields_:
#print ('%s=%d' % (field, getattr(disk_geometry, field)))
outDict[field]=getattr(win_list,field)
return outDict
print(get_temperature())
print(get_voltages())
print(othermethod()) | [
"amazel@aldebaran.com"
] | amazel@aldebaran.com |
7b3800822c7145c0127f54c9dceb8d079296bb3c | 7d36eeeff5861cb485f9e9af27764605d5088c80 | /src/sgi/shortcuts.py | c2ba7f353e29a35e5f83c033df732781ebfdfd72 | [] | no_license | gmorada/sgi | cedb18aed51028fb88c717c4a9ac2f423f93cae2 | 2df75199845dc59ec02a5532605fe31f33633d2f | refs/heads/master | 2020-05-17T15:40:36.256673 | 2012-03-02T03:02:38 | 2012-03-02T03:02:38 | 3,557,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | # -*-coding:utf-8 -*-
from django.http import Http404
from django.core.urlresolvers import RegexURLPattern, get_callable
__all__ = [
'discover_view',
'ViewByMethod'
'RegexUrlPatternByMethod',
'route',
]
def discover_view(view, prefix=''):
if isinstance(view, basestring):
if not view:
raise ValueError('View name is required to discover the callable')
if prefix:
view = prefix + '.' + view
return get_callable(view)
else:
return view
class ViewByMethod(object):
def __init__(self, GET=None, POST=None):
self.GET = GET
self.POST = POST
def __call__(self, request, *args, **kwargs):
if request.method == 'GET' and self.GET:
return self.GET(request, *args, **kwargs)
elif request.method == 'POST' and self.POST:
return self.POST(request, *args, **kwargs)
raise Http404
class RegexUrlPatternByMethod(RegexURLPattern):
def __init__(self, regex, GET=None, POST=None, default_args=None, name=None):
super(RegexUrlPatternByMethod, self).__init__(regex, '', default_args, name)
self.GET = GET
self.POST = POST
def add_prefix(self, prefix):
self.prefix = prefix
def _get_callback(self):
callable_get = discover_view(self.GET, self.prefix)
callable_post = discover_view(self.POST, self.prefix)
return ViewByMethod(callable_get, callable_post)
callback = property(_get_callback)
def route(regex, GET=None, POST=None, kwargs=None, name=None, prefix=''):
return RegexUrlPatternByMethod(regex, GET, POST, kwargs, name) | [
"gusmorada@gmail.com"
] | gusmorada@gmail.com |
b3cab15cfa9ee6275432973979c95e7f4ec1aa44 | dd2366bf11437a2d22383f12d2a8b5d50abc22fd | /coremltools/converters/keras/_keras2_converter.py | 8fdb06b64cf4db80342eba939d66e156e683ab4c | [] | no_license | marshallwhiteorg/coremltools | 1283cc99b3329fa8aa2a826243a68e6e5ae151b8 | 00d50f15219bdfba82c067987f817b38672eb94e | refs/heads/master | 2021-09-12T10:00:38.838074 | 2018-04-16T05:12:21 | 2018-04-16T05:12:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,042 | py | from six import string_types as _string_types
from ...models.neural_network import NeuralNetworkBuilder as _NeuralNetworkBuilder
from ...proto import FeatureTypes_pb2 as _FeatureTypes_pb2
from collections import OrderedDict as _OrderedDict
from ...models import datatypes
from ...models import MLModel as _MLModel
from ...models.utils import save_spec as _save_spec
from ..._deps import HAS_KERAS2_TF as _HAS_KERAS2_TF
if _HAS_KERAS2_TF:
import keras as _keras
from . import _layers2
from . import _topology2
_KERAS_LAYER_REGISTRY = {
_keras.layers.core.Dense: _layers2.convert_dense,
_keras.layers.core.Activation: _layers2.convert_activation,
_keras.layers.advanced_activations.LeakyReLU: _layers2.convert_activation,
_keras.layers.advanced_activations.PReLU: _layers2.convert_activation,
_keras.layers.advanced_activations.ELU: _layers2.convert_activation,
_keras.layers.advanced_activations.ThresholdedReLU: _layers2.convert_activation,
_keras.layers.convolutional.Conv2D: _layers2.convert_convolution,
_keras.layers.convolutional.Conv2DTranspose: _layers2.convert_convolution,
_keras.layers.convolutional.SeparableConv2D: _layers2.convert_separable_convolution,
_keras.layers.pooling.AveragePooling2D: _layers2.convert_pooling,
_keras.layers.pooling.MaxPooling2D: _layers2.convert_pooling,
_keras.layers.pooling.GlobalAveragePooling2D: _layers2.convert_pooling,
_keras.layers.pooling.GlobalMaxPooling2D: _layers2.convert_pooling,
_keras.layers.convolutional.ZeroPadding2D: _layers2.convert_padding,
_keras.layers.convolutional.Cropping2D: _layers2.convert_cropping,
_keras.layers.convolutional.UpSampling2D: _layers2.convert_upsample,
_keras.layers.convolutional.Conv1D: _layers2.convert_convolution1d,
_keras.layers.pooling.AveragePooling1D: _layers2.convert_pooling,
_keras.layers.pooling.MaxPooling1D: _layers2.convert_pooling,
_keras.layers.pooling.GlobalAveragePooling1D: _layers2.convert_pooling,
_keras.layers.pooling.GlobalMaxPooling1D: _layers2.convert_pooling,
_keras.layers.convolutional.ZeroPadding1D: _layers2.convert_padding,
_keras.layers.convolutional.Cropping1D: _layers2.convert_cropping,
_keras.layers.convolutional.UpSampling1D: _layers2.convert_upsample,
_keras.layers.recurrent.LSTM: _layers2.convert_lstm,
_keras.layers.recurrent.SimpleRNN: _layers2.convert_simple_rnn,
_keras.layers.recurrent.GRU: _layers2.convert_gru,
_keras.layers.wrappers.Bidirectional: _layers2.convert_bidirectional,
_keras.layers.normalization.BatchNormalization: _layers2.convert_batchnorm,
_keras.layers.Add: _layers2.convert_merge,
_keras.layers.Multiply: _layers2.convert_merge,
_keras.layers.Average: _layers2.convert_merge,
_keras.layers.Maximum: _layers2.convert_merge,
_keras.layers.Concatenate: _layers2.convert_merge,
_keras.layers.Dot: _layers2.convert_merge,
_keras.layers.core.Flatten: _layers2.convert_flatten,
_keras.layers.core.Permute:_layers2.convert_permute,
_keras.layers.core.Reshape:_layers2.convert_reshape,
_keras.layers.embeddings.Embedding:_layers2.convert_embedding,
_keras.layers.core.RepeatVector:_layers2.convert_repeat_vector,
_keras.engine.topology.InputLayer:_layers2.default_skip,
_keras.layers.core.Dropout:_layers2.default_skip,
_keras.layers.wrappers.TimeDistributed:_layers2.default_skip,
_keras.applications.mobilenet.DepthwiseConv2D:_layers2.convert_convolution,
}
_KERAS_SKIP_LAYERS = [
_keras.layers.core.Dropout,
]
def _is_merge_layer(layer):
for lt in _KERAS_MERGE_LAYERS:
if isinstance(layer, lt):
return True
return False
def _check_unsupported_layers(model):
for i, layer in enumerate(model.layers):
if isinstance(layer, _keras.models.Sequential) or isinstance(layer, _keras.models.Model):
_check_unsupported_layers(layer)
else:
if type(layer) not in _KERAS_LAYER_REGISTRY:
raise ValueError(
"Keras layer '%s' not supported. " % str(type(layer)))
if isinstance(layer, _keras.layers.wrappers.TimeDistributed):
if type(layer.layer) not in _KERAS_LAYER_REGISTRY:
raise ValueError(
"Keras layer '%s' not supported. " % str(type(layer.layer)))
if isinstance(layer, _keras.layers.wrappers.Bidirectional):
if not isinstance(layer.layer, _keras.layers.recurrent.LSTM):
raise ValueError(
"Keras bi-directional wrapper conversion supports only LSTM layer at this time. ")
def _get_layer_converter_fn(layer):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
return _KERAS_LAYER_REGISTRY[layer_type]
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer))
def _load_keras_model(model_network_path, model_weight_path):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
# Load the model weights
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_weight_path)
return loaded_model
def _convert(model,
input_names = None,
output_names = None,
image_input_names = None,
is_bgr = False,
red_bias = 0.0,
green_bias = 0.0,
blue_bias = 0.0,
gray_bias = 0.0,
image_scale = 1.0,
class_labels = None,
predicted_feature_name = None,
predicted_probabilities_output = ''):
if isinstance(model, _string_types):
model = _keras.models.load_model(model)
elif isinstance(model, tuple):
model = _load_keras_model(model[0], model[1])
# Check valid versions
_check_unsupported_layers(model)
# Build network graph to represent Keras model
graph = _topology2.NetGraph(model)
graph.build()
graph.remove_skip_layers(_KERAS_SKIP_LAYERS)
graph.insert_1d_permute_layers()
graph.insert_permute_for_spatial_bn()
graph.defuse_activation()
graph.remove_internal_input_layers()
graph.make_output_layers()
# The graph should be finalized before executing this
graph.generate_blob_names()
graph.add_recurrent_optionals()
inputs = graph.get_input_layers()
outputs = graph.get_output_layers()
# check input / output names validity
if input_names is not None:
if isinstance(input_names, _string_types):
input_names = [input_names]
else:
input_names = ['input' + str(i+1) for i in range(len(inputs))]
if output_names is not None:
if isinstance(output_names, _string_types):
output_names = [output_names]
else:
output_names = ['output' + str(i+1) for i in range(len(outputs))]
if image_input_names is not None and isinstance(image_input_names, _string_types):
image_input_names = [image_input_names]
graph.reset_model_input_names(input_names)
graph.reset_model_output_names(output_names)
# Keras -> Core ML input dimension dictionary
# (None, None) -> [1, 1, 1, 1, 1]
# (None, D) -> [D] or [D, 1, 1, 1, 1]
# (None, Seq, D) -> [Seq, 1, D, 1, 1]
# (None, H, W, C) -> [C, H, W]
# (D) -> [D]
# (Seq, D) -> [Seq, 1, 1, D, 1]
# (Batch, Sequence, D) -> [D]
# Retrieve input shapes from model
if type(model.input_shape) is list:
input_dims = [filter(None, x) for x in model.input_shape]
unfiltered_shapes = model.input_shape
else:
input_dims = [filter(None, model.input_shape)]
unfiltered_shapes = [model.input_shape]
for idx, dim in enumerate(input_dims):
unfiltered_shape = unfiltered_shapes[idx]
dim = list(dim)
if len(dim) == 0:
# Used to be [None, None] before filtering; indicating unknown sequence length
input_dims[idx] = tuple([1])
elif len(dim) == 1:
s = graph.get_successors(inputs[idx])[0]
if isinstance(graph.get_keras_layer(s), _keras.layers.embeddings.Embedding):
# Embedding layer's special input (None, D) where D is actually sequence length
input_dims[idx] = (1,)
else:
input_dims[idx] = dim # dim is just a number
elif len(dim) == 2: # [Seq, D]
input_dims[idx] = (dim[1],)
elif len(dim) == 3: #H,W,C
if (len(unfiltered_shape) > 3):
# keras uses the reverse notation from us
input_dims[idx] = (dim[2], dim[0], dim[1])
else: # keras provided fixed batch and sequence length, so the input was (batch, sequence, channel)
input_dims[idx] = (dim[2],)
else:
raise ValueError('Input' + input_names[idx] + 'has input shape of length' + str(len(dim)))
# Retrieve output shapes from model
if type(model.output_shape) is list:
output_dims = [filter(None, x) for x in model.output_shape]
else:
output_dims = [filter(None, model.output_shape[1:])]
for idx, dim in enumerate(output_dims):
dim = list(dim)
if len(dim) == 1:
output_dims[idx] = dim
elif len(dim) == 2: # [Seq, D]
output_dims[idx] = (dim[1],)
elif len(dim) == 3:
output_dims[idx] = (dim[2], dim[1], dim[0])
input_types = [datatypes.Array(*dim) for dim in input_dims]
output_types = [datatypes.Array(*dim) for dim in output_dims]
# Some of the feature handling is sensitive about string vs. unicode
input_names = map(str, input_names)
output_names = map(str, output_names)
is_classifier = class_labels is not None
if is_classifier:
mode = 'classifier'
else:
mode = None
# assuming these match
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
builder = _NeuralNetworkBuilder(input_features, output_features, mode = mode)
for iter, layer in enumerate(graph.layer_list):
keras_layer = graph.keras_layer_map[layer]
print("%d : %s, %s" % (iter, layer, keras_layer))
if isinstance(keras_layer, _keras.layers.wrappers.TimeDistributed):
keras_layer = keras_layer.layer
converter_func = _get_layer_converter_fn(keras_layer)
input_names, output_names = graph.get_layer_blobs(layer)
converter_func(builder, layer, input_names, output_names, keras_layer)
# Since we aren't mangling anything the user gave us, we only need to update
# the model interface here
builder.add_optionals(graph.optional_inputs, graph.optional_outputs)
# Add classifier classes (if applicable)
if is_classifier:
classes_in = class_labels
if isinstance(classes_in, _string_types):
import os
if not os.path.isfile(classes_in):
raise ValueError("Path to class labels (%s) does not exist." % classes_in)
with open(classes_in, 'r') as f:
classes = f.read()
classes = classes.splitlines()
elif type(classes_in) is list: # list[int or str]
classes = classes_in
else:
raise ValueError('Class labels must be a list of integers / strings, or a file path')
if predicted_feature_name is not None:
builder.set_class_labels(classes, predicted_feature_name = predicted_feature_name,
prediction_blob = predicted_probabilities_output)
else:
builder.set_class_labels(classes)
# Set pre-processing paramsters
builder.set_pre_processing_parameters(image_input_names = image_input_names,
is_bgr = is_bgr,
red_bias = red_bias,
green_bias = green_bias,
blue_bias = blue_bias,
gray_bias = gray_bias,
image_scale = image_scale)
# Return the protobuf spec
spec = builder.spec
return _MLModel(spec)
| [
"masterswanwhite@gmail.com"
] | masterswanwhite@gmail.com |
d00cc988246416b08ae75b55b63a8621cb26a24a | eded209ea550e624a5d55e0c510dbedad43ae2d0 | /ds_save_manager.py | 1aed05b7e41c4746e3fe0ce1e9d47694de0093e4 | [
"MIT"
] | permissive | kurtamohler/ds-save-manager | 9b941b06fd1182ba5369dc11daa1ffc16b1d447d | 99bcd15fc744357513590777a2d2ab8d16e5b540 | refs/heads/main | 2022-11-13T09:02:07.326142 | 2020-07-09T01:03:37 | 2020-07-09T01:03:37 | 278,220,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | #!/usr/bin/env python
import argparse
import os
import sys
import distutils.dir_util
import datetime
import warnings
def get_env_var_path(var_name, default_path_from_home):
path = os.environ.get(var_name)
if path is None:
home_dir = os.environ.get("HOME")
if home_dir is not None:
path = os.path.join(home_dir, default_path_from_home)
return path
game_dir = get_env_var_path("DS_GAME_DIR", ".steam/steam/steamapps/compatdata/570940/pfx/drive_c/users/steamuser/My Documents/NBGI/DARK SOULS REMASTERED")
saves_dir = get_env_var_path("DS_SAVES_DIR", "Documents/ds1_backups")
def get_saves():
return os.listdir(saves_dir)
def list_saves():
saves = get_saves()
print('Existing saves in dir "%s":' % saves_dir)
for save in sorted(saves):
print(" %s" % save)
def parse_save_name(save_name):
save_name_split = save_name.split('.')
save_name_idx = int(save_name_split[1].split('-')[0])
return save_name_split[0], save_name_idx
def save(label):
cur_date = datetime.datetime.today().strftime('%Y_%m_%d')
cur_ind = 0
# If any other saves exist for this date, make sure to use a unique index
for save in get_saves():
save_date, save_ind = parse_save_name(save)
if save_date == cur_date:
if save_ind >= cur_ind:
cur_ind = save_ind + 1
new_save = cur_date + '.%03d' % cur_ind
if label:
new_save += '-' + label
new_save_path = os.path.join(saves_dir, new_save)
print('Saving under name: %s' % new_save)
print('Saving to directory: %s' % new_save_path)
distutils.dir_util.copy_tree(game_dir, new_save_path)
def load(save_name):
save_path = os.path.join(saves_dir, save_name)
print('Loading from directory: %s' % save_path)
distutils.dir_util.copy_tree(save_path, game_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'Manage Dark Souls save files'
)
parser.add_argument('--list', action='store_true')
parser.add_argument('--save', action='store_true')
parser.add_argument('--label', type=str)
parser.add_argument('--load', type=str)
parser.add_argument('--game-dir', type=str)
parser.add_argument('--saves-dir', type=str)
args = parser.parse_args()
if args.load and args.save:
raise RuntimeError("Cannot load and save")
if args.load and args.list:
raise RuntimeError("Cannot load and list")
if args.save and args.list:
raise RuntimeError("Cannot save and list")
if args.game_dir:
game_dir = args.game_dir
if args.saves_dir:
saves_dir = args.saves_dir
if args.list:
if args.label:
warnings.warn('"--label" arg is only used when saving')
list_saves()
elif args.save:
save(args.label)
elif args.load:
if args.label:
warnings.warn('"--label" arg is only used when saving')
load(args.load)
else:
parser.print_help(sys.stderr)
| [
"kurtamohler@gmail.com"
] | kurtamohler@gmail.com |
24e0536adfa76c8b2e39c88fef49962d9a195ded | ea702cbc8bc9ed707982d6707d1e9bd9bfeabe23 | /maze.py | cfc304cc3b67af8928a670a74f6dc505100a6982 | [] | no_license | flyhawksz/study-algorithms | 8c0a84b65032843f551990fd610bfd169f5ac43e | b63c4ad1688a48e760b5ecea43015853f3739dc2 | refs/heads/master | 2021-08-26T08:37:22.377627 | 2017-11-22T13:59:21 | 2017-11-22T13:59:21 | 103,249,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,249 | py | # -*- coding: utf-8 -*-
# @Time : 2017-9-22 15:18
# @Author : zhangqi
# @File : maze.py
# @Software: PyCharm Community Edition
# 迷宫ADT
from myarray2d import Array2D
from Class_LStack import LStack
class Maze(object):
MAZE_WALL = "*" # 墙
PATH_TOKEN = "x" # 表示走过的路径
TRIED_TOKEN = "o" # 死路
def __init__(self, numRows, numCols):
self._mazeCells = Array2D(numRows, numCols)
self._startCell = None
self._exitCell = None
def numRows(self):
return self._mazeCells.numRows()
def numCols(self):
return self._mazeCells.numCols()
def setWall(self, row, col):
assert 0 <= row < self.numRows() and 0 <= col < self.numCols(), "Cell index out of range."
self._mazeCells[row, col] = Maze.MAZE_WALL
def setStart(self, row, col):
assert 0 <= row < self.numRows() and 0 <= col < self.numCols(), "Cell index out of range."
self._startCell = _CellPosition(row, col)
def setExit(self, row, col):
assert 0 <= row < self.numRows() and 0 <= col < self.numCols(), "Cell index out of range."
self._exitCell = _CellPosition(row, col)
def findPath(self):
dirctions = [(-1, 0), (0, 1), (1, 0), (0, -1)] # 方向集
path = Stack()
path.push(self._startCell)
while not path.isEmpty():
curPos = path.peek() # 栈顶表示当前位置
if self._exitFound(curPos.row, curPos.col): # 检测当前位置是否就是终点
self._markPath(curPos.row, curPos.col)
break
chioce = 0
for dirction in dirctions: # 先找出有多少种选择
if self._validMove(curPos.row + dirction[0], curPos.col + dirction[1]):
chioce += 1
if chioce == 0: # 如果不能移动,就将当前位置标记为'o',并弹出栈
self._markTried(curPos.row, curPos.col)
path.pop()
else: # 如果能移动,标记当前位置为'x',然后选择一个方向进行尝试,直到走到死路或者找到终点。
for dirction in dirctions:
if self._validMove(curPos.row + dirction[0], curPos.col + dirction[1]):
self._markPath(curPos.row, curPos.col)
nextPos = _CellPosition(curPos.row + dirction[0], curPos.col + dirction[1])
path.push(nextPos)
break
if len(path):
return True
else:
return False
# 删除所有标记,即"x"和"o"。
def reset(self):
for row in range(self.numRows()):
for col in range(self.numCols()):
if self._mazeCells[row, col] in 'ox':
self._mazeCells[row, col] = None
def draw(self):
for row in range(self.numRows()):
str = ''
for col in range(self.numCols()):
if self._mazeCells[row, col] != None:
str += self._mazeCells[row, col]
else:
str += '.'
print str
# 是否能移动到该位置
def _validMove(self, row, col):
return 0 <= row < self.numRows() and 0 <= col < self.numCols() and self._mazeCells[row, col] is None
# 判断当前点是否为终点
def _exitFound(self, row, col):
return row == self._exitCell.row and col == self._exitCell.col
# 将该位置设置为死路
def _markTried(self, row, col):
self._mazeCells[row, col] = Maze.TRIED_TOKEN
# 标记走过的路
def _markPath(self, row, col):
self._mazeCells[row, col] = Maze.PATH_TOKEN
# 储存类
class _CellPosition(object):
def __init__(self, row, col):
self.row = row
self.col = col
# 从文件中建立迷宫,并解决迷宫
# 建立迷宫
def buildMaze(filename):
with open(filename, 'r') as infile:
nrows, ncols = readValuePair(infile) # 迷宫大小
maze = Maze(nrows, ncols) # 建立迷宫,并初始化
row, col = readValuePair(infile)
maze.setStart(row, col) # 根据给定坐标设定起始点
row, col = readValuePair(infile)
maze.setExit(row, col) # 设定终点
# 设定墙
for row in range(nrows):
line = infile.readline()
for col in range(len(line)):
if line[col] == "*":
maze.setWall(row, col)
infile.close()
return maze
# 辅助方法,从给定文件中读取整数对值
def readValuePair(infile):
line = infile.readline()
(valA, valB) = tuple(line.split())
return int(valA), int(valB)
def main():
maze = buildMaze("mazefile.txt")
if maze.findPath():
print "Path found ..."
maze.draw()
else:
print "Path not found ..."
if __name__ == "__main__":
main() | [
"30296157+flyhawksz@users.noreply.github.com"
] | 30296157+flyhawksz@users.noreply.github.com |
fb4551a5cb98b91e4fc032fac6cb75e762397dab | e8a9acb44199832e1f88cd69bcbae6a70eaeb47f | /src/SquareLattice1.py | cdb466604dfc176e2d35d1ac2843b57a01ac7c80 | [
"MIT"
] | permissive | juampabonilla1/summer-project-2021 | df81b2448cf6acda4f09bf493f5c5970f6323998 | 4b1f8148f2094228438202e7e6268c4dde19c537 | refs/heads/master | 2023-03-10T09:34:16.957483 | 2021-02-18T23:16:38 | 2021-02-18T23:16:38 | 340,182,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,120 | py | import Qubit
import Plaquette
class SquareLattice1:
def __init__(self, size):
"""
Initialisation of LxL periodic toric code lattice.
Notes:
* The dimension L of the LxL lattice must be even.
* The lattice has alternating parity/colour plaquettes.
Chessboard-like with dark and light plaquettes.
* X errors light up the 2 nearby dark plaquettes.
* Y errors light up all 4 nearby plaquettes.
* Z errors light up the 2 nearby light plaquettes.
* Example layout of a 4x4 lattice. The boundaries are called 'edge'
(leftmost) and 'middle' (rightmost), which becomes useful terminology
when concatenating lattices.
(0,0)---(0,1)---(0,2)---(0,3)---(0,0)
| | | | |
| | . | | . |
| | | | |
(1,0)---(1,1)---(1,2)---(1,3)---(1,0)
| | | | |
| . | | . | |
| | | | |
(2,0)---(2,1)---(2,2)---(2,3)---(2,0)
| | | | |
| | . | | . |
| | | | |
(3,0)---(3,1)---(3,2)---(3,3)---(3,0)
| | | | |
| . | | . | |
| | | | |
(4,0)---(4,1)---(4,2)---(4,3)---(4,0)
^ ^
| |
Edge boundary Middle boundary
:param size: Dimension L of LxL lattice.
:type size: int
"""
assert size % 2 == 0
self.size = size
self.qubits = [[[] for _ in range(size)] for _ in range(size)]
self.plaquettes = [[[] for _ in range(size)] for _ in range(size)]
# Initialise qubits and parity check bits to trivial state.
for i in range(size):
for j in range(size):
self.qubits[i][j] = Qubit.Qubit('I')
self.plaquettes[i][j] = Plaquette.Plaquette(0)
def apply_Y(self, i, j, boundary='none'):
"""
Apply Y operator to qubit at position (i, j) in the lattice.
Note:
* Y error lights up NW, NE, SW and SE plaquettes.
:param i: Row position of the qubit
:type i: int
:param j: Column position of the qubit
:type j: int
"""
L = self.size
if boundary == 'middle':
self.qubits[i][j].apply_Y()
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
self.plaquettes[i][(j - 1) % L].flip() # SW
elif boundary == 'edge':
self.plaquettes[(i - 1) % L][j].flip() # NE
self.plaquettes[i][j].flip() # SE
elif boundary == 'none':
L = self.size
self.qubits[i][j].apply_Y()
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
self.plaquettes[(i - 1) % L][j].flip() # NE
self.plaquettes[i][(j - 1) % L].flip() # SW
self.plaquettes[i][j].flip() # SE
else:
assert True is False
def apply_Z(self, i, j, boundary='none'):
"""
Apply Z operator to qubit at position (i,j) in the lattice.
Note:
* Z error lights up even parity neighbour plaquettes.
:param i: Row position of the qubit
:type i: int
:param j: Column position of the qubit
:type j: int
"""
L = self.size
if (i + j) % 2 == 1:
if boundary == 'middle':
self.qubits[i][j].apply_Z()
self.plaquettes[i][(j - 1) % L].flip() # SW
elif boundary == 'edge':
self.plaquettes[(i - 1) % L][j].flip() # NE
elif boundary == 'none':
# NE and SW plaquettes are even parity.
self.qubits[i][j].apply_Z()
self.plaquettes[(i - 1) % L][j].flip() # NE
self.plaquettes[i][(j - 1) % L].flip() # SW
else:
assert True is False
else:
if boundary == 'middle':
self.qubits[i][j].apply_Z()
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
elif boundary == 'edge':
self.plaquettes[i][j].flip() # SE
elif boundary == 'none':
self.qubits[i][j].apply_Z()
# NW and SE plaquettes are even parity.
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
self.plaquettes[i][j].flip() # SE
else:
assert True is False
def apply_X(self, i, j, boundary='none'):
"""
Apply X operator to qubit at position (i,j) in the lattice.
Note:
* X error lights up odd parity neighbour plaquettes.
:param i: Row position of the qubit
:type i: int
:param j: Column position of the qubit
:type j: int
"""
L = self.size
if (i + j) % 2 == 1:
if boundary == 'middle':
self.qubits[i][j].apply_X()
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
elif boundary == 'edge':
self.plaquettes[i][j].flip() # SE
elif boundary == 'none':
self.qubits[i][j].apply_X()
# NW and SE plaquettes are odd parity.
self.plaquettes[(i - 1) % L][(j - 1) % L].flip() # NW
self.plaquettes[i][j].flip() # SE
else:
assert True is False
else:
if boundary == 'middle':
self.qubits[i][j].apply_X()
self.plaquettes[i][(j - 1) % L].flip() # SW
elif boundary == 'edge':
self.plaquettes[(i - 1) % L][j].flip() # NE
elif boundary == 'none':
self.qubits[i][j].apply_X()
# NE and SW plaquettes are odd parity.
self.plaquettes[(i - 1) % L][j].flip() # NE
self.plaquettes[i][(j - 1) % L].flip() # SW
else:
assert True is False
def apply_stabiliser(self, stab):
"""
Apply a stabiliser to the lattice provided a plquette coordinate.
:param stab: The coordinates of the plquette on which the stabiliser
should be applied. In the form stab=(x,y).
:type stab: tuple of int
"""
L = self.size
x, y = stab
corners = [(x, y), ((x + 1) % L, y), (x, (y + 1) % L),
((x + 1) % L, (y + 1) % L)]
if (x + y) % 2 == 0:
for i, j in corners:
self.apply_X(i, j)
else:
for i, j in corners:
self.apply_Z(i, j)
def cast_qubit_state_to_list(self):
"""
Packages the qubit state of the lattice into a list of coordinates
where the Pauli errors are located.
:return: List of coordinates where Pauli X, Y and Z errors are located
in the lattice.
:rtype: List of list of tuples.
"""
L = self.size
X_coords, Y_coords, Z_coords = [], [], []
for i in range(L):
for j in range(L):
q_state = self.qubits[i][j].state
if q_state == 'X':
X_coords.append((i, j))
elif q_state == 'Y':
Y_coords.append((i, j))
elif q_state == 'Z':
Z_coords.append((i, j))
else:
assert q_state == 'I'
return [X_coords, Y_coords, Z_coords]
def weight(self):
"""
The weight of the Pauli operator present in the lattice.
:return: The number of qubits where a non-trivial Pauli operator
acts on.
:rtype: int
"""
L = self.size
wt = 0
for i in range(L):
for j in range(L):
if self.qubits[i][j].state != 'I':
wt += 1
return wt
def apply_correction_from_lst(self, lst):
"""
Applies a correction operator to the lattice given a list of
coordinates where Pauli X, Y and Z operators act on.
:param lst: List of coordinates where X, Y and Z errors act on.
:type lst: List of list of tuple.
"""
X_corr, Y_corr, Z_corr = lst
for i, j in X_corr:
self.apply_X(i, j)
for i, j in Y_corr:
self.apply_Y(i, j)
for i, j in Z_corr:
self.apply_Z(i, j)
def correct_in_lattice1(self, v, u, dec_meth='part_of_glued_lattice'):
"""
Applies a correction to the lattice given the coordinates of the
matched defects and whether the lattice is part of a concatenated
glued lattice object or a standalone standard toric code lattice.
:param v: Coordinate of first defect in the form 'x1,y1,lat1'. The
final lat information dictates which lattice the defect is on. Either
1 for main lattice, 2 for conditional lattice or 3 to indicate it's
a dummy defect.
:type v: str
:param u: Coordinate of first defect in the form 'x2,y2,lat2'. The
final lat information dictates which lattice the defect is on. Either
1 for main lattice, 2 for conditional lattice or 3 to indicate it's
a dummy defect.
:type u: str
:param dec_meth: Either 'part_of_glued_lattice', or 'standard'.
:type dec_meth: str
"""
L = self.size
x1, y1, lat1 = v.split(",")
x2, y2, lat2 = u.split(",")
x1, y1, lat1 = int(x1), int(y1), int(lat1)
x2, y2, lat2 = int(x2), int(y2), int(lat2)
# Account for dummy defects whose lattice is flagged as 3 but they are
# really in lattice 2 (the conditional lattice).
if lat1 == 3:
lat1 = 2
if lat2 == 3:
lat2 = 2
if lat1 == lat2:
if lat1 == 1:
assert lat2 == 1
assert (x1 + y1) % 2 == (x2 + y2) % 2
if (x1 + y1) % 2 == 0:
# Correct along the white plaquettes.
if y1 <= y2:
self.correct_along_X_or_Z_symmetry(x1, y1, x2,
y2, dec_meth,
parity=0)
else:
self.correct_along_X_or_Z_symmetry(x2, y2, x1,
y1, dec_meth,
parity=0)
else:
# Correct along the dark plaquettes.
if y1 <= y2:
self.correct_along_X_or_Z_symmetry(x1, y1, x2,
y2, dec_meth,
parity=1)
else:
self.correct_along_X_or_Z_symmetry(x2, y2, x1,
y1, dec_meth,
parity=1)
else:
# Make x1, y1 the coordinates of defect in lattice 1.
if lat1 == 2:
x1, x2 = x2, x1
y1, y2 = y2, y1
# Record parity of first defect, so we know along which plaquettes
# to move in lattice 1.
parity = (x1 + y1) % 2
# Check whether you are matching across the middle boundary or the
# edge boundaries
# TODO: update these weights which clearly imply that A=B1=B2.
bd_middle_correction_ly_wt = (L + y2) - y1
bd_edge_correction_ly_wt = 2 * L - ((L + y2) - y1)
if bd_middle_correction_ly_wt <= bd_edge_correction_ly_wt:
ly_in_lat1 = L - y1
# If defect in lattice 2 is above or at same level as defect
# in lattice 1.
if x2 <= x1:
if x1 - x2 <= L - (x1 - x2):
# Go up.
steps_up = min([ly_in_lat1, x1 - x2])
horizontal_steps = ly_in_lat1 - steps_up
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Account for potentially not moving vertically.
if steps_up == 0:
x, y = x % L, (y) % L
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else:
# Go down.
steps_down = min([ly_in_lat1, L - (x1 - x2)])
horizontal_steps = ly_in_lat1 - steps_down
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Check that you moved vertically.
assert steps_down > 0
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else: # If defect in lattice 2 is below defect in lattice 1.
if x2 - x1 <= L - (x2 - x1):
# Go down.
steps_down = min([ly_in_lat1, x2 - x1])
horizontal_steps = ly_in_lat1 - steps_down
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Account for potentially not moving vertically.
if steps_down == 0:
x, y = x % L, (y) % L
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else:
# Go up.
steps_up = min([ly_in_lat1, L - (x2 - x1)])
horizontal_steps = ly_in_lat1 - steps_up
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Check that you moved vertically.
assert steps_up > 0
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new + j + 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
elif bd_middle_correction_ly_wt > bd_edge_correction_ly_wt:
ly_in_lat1 = y1 + 1
# If defect in lattice 2 is above or at same level as defect
# in lattice 1.
if x2 <= x1:
if x1 - x2 <= L - (x1 - x2):
# Go up.
steps_up = min([ly_in_lat1, x1 - x2])
horizontal_steps = ly_in_lat1 - steps_up
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Account for potentially not moving vertically.
if steps_up == 0:
x, y = x % L, (y + 1) % L
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new - j - 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else:
# Go down.
steps_down = min([ly_in_lat1, L - (x1 - x2)])
horizontal_steps = ly_in_lat1 - steps_down
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Check that you moved vertically.
assert steps_down > 0
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new - j - 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else: # If defect in lattice 2 is below defect in lattice 1.
if x2 - x1 <= L - (x2 - x1):
# Go down.
steps_down = min([ly_in_lat1, x2 - x1])
horizontal_steps = ly_in_lat1 - steps_down
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Account for potentially not moving vertically.
if steps_down == 0:
x, y = x % L, (y + 1) % L
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new - j - 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
else:
# Go up.
steps_up = min([ly_in_lat1, L - (x2 - x1)])
horizontal_steps = ly_in_lat1 - steps_up
# Apply correction, starting at x1, y1 and moving
# until boundary is reached.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
# Make sure that you moved vertically.
assert steps_up > 0
x_new, y_new = x, y
for j in range(horizontal_steps):
x, y = x_new % L, (y_new - j - 1) % L
self.apply_X(x, y) if parity == 1 else \
self.apply_Z(x, y)
def correct_along_X_or_Z_symmetry(self, x1, y1, x2, y2, dec_meth, parity):
"""
Finds and applies a correction given two defects in a toric code
lattice which can either be standalone or as part of a concatenated
glued lattice object.
:param x1: Row coordinate of first defect.
:type x1: int
:param y1: Column coordinate of first defect.
:type y1: int
:param x2: Row coordinate of second defect.
:type x2: int
:param y2: Column coordinate of second defect.
:param dec_meth: Either 'part_of_glued_lattice', or 'standard'.
:type dec_meth: str
:param parity: Determines which symmetry we move along. 0 for Z
symmetry. 1 for X symmetry.
:type parity: int
"""
# Order is important! Start at (x1, y1) and match inside the lattice
# (horizontally) to get to (x2, y2).
L = self.size
# Check if we are crossing boundaries.
bd = None
if y1 == -1:
bd = 'edge'
elif y2 == L:
bd = 'middle'
# Check that second defect is to the right of first defect.
assert y2 >= y1
if parity == 1:
# Check that points lie on X symmetry.
assert (x1 + y1) % 2 == 1
assert (x2 + y2) % 2 == 1
else:
# Check that points lie on Z symmetry.
assert (x1 + y1) % 2 == 0
assert (x2 + y2) % 2 == 0
# print(x1, y1, x2, y2, bd)
if (y2 - y1 <= L - (y2 - y1)) or dec_meth == 'part_of_glued_lattice':
# Correct inside the lattice.
ly = y2 - y1 # Horizontal distance between defects.
# If 2nd defect is above or at same level as 1st defect.
if x2 <= x1:
if x1 - x2 <= L - (x1 - x2):
lx = x1 - x2
# Go up.
steps_up = min([ly, lx])
# Apply correction, start at x1, y1 and ending at x2, y2.
x, y = x1, y1
# Keep track of where you first applied a gate.
x_ini, y_ini = None, None
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x_ini is None:
x_ini, y_ini = x, y
if lx < ly and ((bd is None) or (bd == 'edge') or ((bd == 'middle') and ((y2 - y) % L > 0))):
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, y % L
x, y, x0, y0 = self.correct_horizontally(x, y, x2, y2, parity)
if x_ini is None:
x_ini, y_ini = x0, y0
elif lx >= ly and min([abs(x2 - x1), L - abs(x2 - x1)]) > 0:
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, y % L
x, y, x0, y0 = self.correct_vertically(x, y, x2, y2, parity, up=True)
if x_ini is None:
x_ini, y_ini = x0, y0
x_md, y_md = x, y
x_ed, y_ed = x_ini, y_ini
else:
lx = L - (x1 - x2)
# Go down.
steps_down = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
x_ini, y_ini = None, None # Keep track of where you first applied a gate.
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x_ini is None:
x_ini, y_ini = x, y
if lx < ly and ((bd is None) or (bd == 'edge') or ((bd == 'middle') and ((y2 - y) % L > 0))):
# Transform back from qubit coord to plaquette coord if moved.
if steps_down > 0:
x, y = x % L, y % L
x, y, x0, y0 = self.correct_horizontally(x, y, x2, y2, parity)
if x_ini is None:
x_ini, y_ini = x0, y0
elif lx >= ly and min([abs(x2 - x1), L - abs(x2 - x1)]) > 0:
# Transform back from qubit coord to plaquette coord if moved.
if steps_down > 0:
x, y = x % L, y % L
x, y, x0, y0 = self.correct_vertically(x, y, x2, y2, parity, up=False)
if x_ini is None:
x_ini, y_ini = x0, y0
x_md, y_md = x, y
x_ed, y_ed = x_ini, y_ini
else: # If 2nd defect is below 1st defect.
if x2 - x1 <= L - (x2 - x1):
lx = x2 - x1
# Go down.
steps_down = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
x_ini, y_ini = None, None # Keep track of where you first applied a gate.
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x_ini is None:
x_ini, y_ini = x, y
if lx < ly and ((bd is None) or (bd == 'edge') or ((bd == 'middle') and ((y2 - y) % L > 0))):
# Transform back from qubit coord to plaquette coord if moved
if steps_down > 0:
x, y = x % L, y % L
x, y, x0, y0 = self.correct_horizontally(x, y, x2, y2, parity)
if x_ini is None:
x_ini, y_ini = x0, y0
elif lx >= ly and min([abs(x2 - x1), L - abs(x2 - x1)]) > 0:
# Transform back from qubit coord to plaquette coord if moved
if steps_down > 0:
x, y = x % L, y % L
x, y, x0, y0 = self.correct_vertically(x, y, x2, y2, parity, up=False)
if x_ini is None:
x_ini, y_ini = x0, y0
x_md, y_md = x, y
x_ed, y_ed = x_ini, y_ini
else:
lx = L - (x2 - x1)
# Go up.
steps_up = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
x_ini, y_ini = None, None # Keep track of where you first applied a gate.
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 + j + 1) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x_ini is None:
x_ini, y_ini = x, y
if lx < ly and ((bd is None) or (bd == 'edge') or ((bd == 'middle') and ((y2 - y) % L > 0))):
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, y % L
x, y, x0, y0 = self.correct_horizontally(x, y, x2, y2, parity)
if x_ini is None:
x_ini, y_ini = x0, y0
elif lx >= ly and min([abs(x2 - x1), L - abs(x2 - x1)]) > 0:
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, y % L
x, y, x0, y0 = self.correct_vertically(x, y, x2, y2, parity, up=True)
if x_ini is None:
x_ini, y_ini = x0, y0
x_md, y_md = x, y
x_ed, y_ed = x_ini, y_ini
if bd == 'middle':
# Undo last correction and apply appropriate boundary
# operator.
self.apply_X(x_md, y_md) if parity == 1 else self.apply_Z(x_md, y_md)
self.apply_X(x_md, y_md, bd) if parity == 1 else \
self.apply_Z(x_md, y_md, bd)
elif bd == 'edge':
# Undo first correction and apply appropriate boundary
# operator.
assert x_ed is not None and y_ed is not None
self.apply_X(x_ed, y_ed) if parity == 1 else self.apply_Z(x_ed, y_ed)
self.apply_X(x_ed, y_ed, bd) if parity == 1 else \
self.apply_Z(x_ed, y_ed, bd)
else:
assert bd is None
elif (y2 - y1 > L - (y2 - y1)):
assert dec_meth == 'standard'
# Correct around the lattice.
# TODO: Fill this method.
ly = L - (y2 - y1) # Horizontal distance between defects.
# If 2nd defect is above or at same level as 1st defect.
if x2 <= x1:
if x1 - x2 <= L - (x1 - x2):
lx = x1 - x2
# Go up.
steps_up = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, (y - 1) % L
if lx < ly:
self.correct_horizontally_to_left(x, y, x2, y2, parity)
else:
self.correct_vertically(x, y, x2, y2, parity, up=True)
else:
lx = L - (x1 - x2)
# Go down.
steps_down = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
# Transform back from qubit coord to plaquette coord if moved.
if steps_down > 0:
x, y = x % L, (y - 1) % L
if lx < ly:
self.correct_horizontally_to_left(x, y, x2, y2, parity)
else:
self.correct_vertically(x, y, x2, y2, parity, up=False)
else: # If 2nd defect is below 1st defect.
if x2 - x1 <= L - (x2 - x1):
lx = x2 - x1
# Go down.
steps_down = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
for j in range(steps_down):
x, y = (x1 + j + 1) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
# Transform back from qubit coord to plaquette coord if moved
if steps_down > 0:
x, y = x % L, (y - 1) % L
if lx < ly:
self.correct_horizontally_to_left(x, y, x2, y2, parity)
else:
self.correct_vertically(x, y, x2, y2, parity, up=False)
else:
lx = L - (x2 - x1)
# Go up.
steps_up = min([ly, lx])
# Apply correction, starting at x1, y1 and ending at x2, y2.
x, y = x1, y1
for j in range(steps_up):
x, y = (x1 - j) % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
# Transform back from qubit coord to plaquette coord if moved.
if steps_up > 0:
x, y = (x - 1) % L, (y - 1) % L
if lx < ly:
self.correct_horizontally_to_left(x, y, x2, y2, parity)
else:
self.correct_vertically(x, y, x2, y2, parity, up=True)
def correct_horizontally(self, x1, y1, x2, y2, parity):
"""
Finds and applies a correction between two defects which are on the
same row.
:param x1: Row coordinate of first defect.
:type x1: int
:param y1: Column coordinate of first defect.
:type y1: int
:param x2: Row coordinate of second defect.
:type x2: int
:param y2: Column coordinate of second defect.
:param parity: Determines which symmetry we move along. 0 for Z
symmetry. 1 for X symmetry.
:type parity: int
:return: The coordinates where the first gate is applied, (x0,y0), and
the coordinates where the last gate is applied, (x,y).
:rtype: tuple
"""
L = self.size
x0, y0 = None, None
for j in range(y2 - y1):
x, y = x1 % L, (y1 + 1 + j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x0 is None:
x0, y0 = x, y
return (x, y, x0, y0)
def correct_horizontally_to_left(self, x1, y1, x2, y2, parity):
"""
Finds and applies a correction between two defects which are on the
same row. The correction is applied starting at the first defect and
moving left to the second defect, potentially around the lattice.
:param x1: Row coordinate of first defect.
:type x1: int
:param y1: Column coordinate of first defect.
:type y1: int
:param x2: Row coordinate of second defect.
:type x2: int
:param y2: Column coordinate of second defect.
:param parity: Determines which symmetry we move along. 0 for Z
symmetry. 1 for X symmetry.
:type parity: int
"""
L = self.size
if y1 >= y2:
for j in range((y1 - y2) % L):
x, y = x1 % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
else:
for j in range(L - (y2 - y1)):
x, y = x1 % L, (y1 - j) % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
def correct_vertically(self, x1, y1, x2, y2, parity, up):
"""
Finds and applies a correction between two defects which are on the
same column.
:param x1: Row coordinate of first defect.
:type x1: int
:param y1: Column coordinate of first defect.
:type y1: int
:param x2: Row coordinate of second defect.
:type x2: int
:param y2: Column coordinate of second defect.
:param parity: Determines which symmetry we move along. 0 for Z
symmetry. 1 for X symmetry.
:type parity: int
:param up: Whether to move up starting at the first defect or not
(i.e. move down).
:type up: bool
:return: The coordinates where the first gate is applied, (x0,y0), and
the coordinates where the last gate is applied, (x,y).
:rtype: tuple
"""
L = self.size
x, y = x1, y1
x0, y0 = None, None
if up:
for j in range(min([abs(x2 - x1), L - abs(x2 - x1)])):
x, y = (x1 - j) % L, y1 % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x0 is None:
x0, y0 = x, y
else:
for j in range(min([abs(x2 - x1), L - abs(x2 - x1)])):
x, y = (x1 + j + 1) % L, y1 % L
self.apply_X(x, y) if parity == 1 else self.apply_Z(x, y)
if x0 is None:
x0, y0 = x, y
return (x, y, x0, y0)
def is_in_trivial_state_X1(self):
"""
Checks for type 1 logical operators. These are rows of X operators
(XXXX) or any muplication of these by stabilisers. Ignore Z errors.
:return: Whether it is in trivial state with respect to the logical
operators or not.
:rtype: Bool
"""
L = self.size
# Check parity along columns
# Note we exclude boundary qubits since the applied correction is true
# up to the boundaries and there are remnant defects there.
for j in range(1, L - 1):
total = 0
for i in range(L):
if self.qubits[i][j].state == 'Y' or \
self.qubits[i][j].state == 'X':
total += 1
if total % 2 == 1:
return False
return True
def is_in_code_space(self):
"""
Checks whether the state of the lattice is in the code space and
therefore suitable for making an inference about its logical error
state.
:return: Whether it is in the code space or not.
:rtype: Bool
"""
L = self.size
for i in range(L):
for j in range(L):
if self.plaquettes[i][j].state == 1:
return False
return True
def are_defects_at_boundary(self):
"""
Checks whether the defects on the lattice are at the boundary
plaquettes, in the horizontal direction.
Notes:
* This method should be used to check that the remnant defects all lie
at the boundary after a correction is applied when the lattice is
part of a concatenated glued lattice object.
:return: Whether the defects in the lattice or lie at the boundary
plaquettes or not.
:rtype: Bool
"""
L = self.size
for i in range(L):
for j in range(L):
if (j != 0) and (j != L - 1) and (self.plaquettes[i][j].state == 1):
return False
return True
def print_plaquettes(self):
"""
Prints the plaquette state of the lattice, i.e. where the defects lie.
"""
for i in range(self.size):
for j in range(self.size):
print(self.plaquettes[i][j].state, end="")
print()
print()
def __repr__(self):
strn = []
for i in range(self.size):
for j in range(self.size):
strn.append(self.qubits[i][j].state)
strn.append('\n')
return "".join(strn)
| [
"juampabonilla1@hotmail.com"
] | juampabonilla1@hotmail.com |
49181e7f9bd1b2e8a87fc583abc666ec7df2d547 | 931e7e4752d9d57d3193269ad55ff0ef917c75f5 | /Problems/adjacent_dups.py | faadbf1f1e894f5c86c2fb90063dbbc514d9398d | [
"MIT"
] | permissive | rohanaurora/daily-coding-challenges | 40f1d389b5cedb49fc7811a4646035b71a8507fd | a40340e5a4f15a5f7b66a64f98dc149a66ebd945 | refs/heads/master | 2023-04-13T01:48:27.195719 | 2021-04-16T19:55:45 | 2021-04-16T19:55:45 | 267,232,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | # Remove All Adjacent Duplicates In String
# Given a string S of lowercase letters, a duplicate removal consists of choosing two adjacent and equal letters, and
# removing them.
# We repeatedly make duplicate removals on S until we no longer can. Return the final string after all such duplicate
# removals have been made. It is guaranteed the answer is unique.
#
# Input: "abbaca"
# Output: "ca"
#
# Source - https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string/
class Solution:
def removeDuplicates(self, S):
res = []
for i in S:
if res and res[-1] == i:
res.pop()
else:
res.append(i)
return "".join(res)
input = "abbaca"
s = Solution().removeDuplicates(input)
print(input)
print(s)
| [
"rohanaurora@gmail.com"
] | rohanaurora@gmail.com |
35727edd3e3b18e4298f87b750ad406076489f4c | a3e1283726b1b9e0c6280d193f9d29d129060981 | /server/speedy-notifier/dev.conf | 2bb01b320d320e5b25b62b5053144d68da36f2c8 | [
"Apache-2.0"
] | permissive | michaelmaguire/twosidedsearch | f3d89811e5ce5ba99f717fd2d3e1058fc35fb2ea | 67ec32c92ffdeb222b7fdf69cb0d9c8270dbcd0e | refs/heads/master | 2021-01-10T06:31:18.018244 | 2015-11-09T02:33:54 | 2015-11-09T02:33:54 | 45,808,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | conf | # -*- Mode: Python -*-
#
# This is the configuration file for 'speedy_notifier'.
GOOGLE_BATCH_SIZE=50
GOOGLE_MAX_REQUESTS_PER_SECOND=1
GOOGLE_INITIAL_BACKOFF=1
GOOGLE_MAX_BACKOFF=60
GOOGLE_API_KEY="AIzaSyDlHRufS28XTavwzHFjj6eSlPeiT3dgN9k"
APPLE_BATCH_SIZE=100
APPLE_MAX_REQUESTS_PER_SECOND=1
APPLE_INITIAL_BACKOFF=1
APPLE_MAX_BACKOFF=60
APPLE_CERT_PATH=""
LOG_NAME="test.log"
LOG_KEEP=5
LOG_LEVEL=10 # 10=DEBUG, 20=INFO
POSTGRES_DSN="dbname=speedy_hack user=speedycrew"
| [
"munro@ip9.org"
] | munro@ip9.org |
89a1524551dcef2f11f6d445427877eb4b1450a4 | 21609f44b26d68aeffe9547b38735043c1940159 | /list03.py | 90b63ee1bb770db4565bbf961512ea4bce84162e | [] | no_license | rlatmdwn01/Python | 85a8a6a7612cc2fbdb166518f285223970ce21f3 | 21b405aab9f4f532d780ebc66fc05b8267eae3c6 | refs/heads/main | 2023-01-20T03:33:19.670856 | 2020-11-18T08:58:17 | 2020-11-18T08:58:17 | 313,662,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | list_a=[0,1,2,3,4,5]
print("#리스트의 요소 하나 제거하기")
#제거 방법[1]-del
del list_a[1]
print("del list_a[1]:", list_a)
#제거 방법[2]-pop()
list_a.pop(2)
print("pop(2):",list_a) | [
"noreply@github.com"
] | noreply@github.com |
529921d89111639317d1c7c9899a0115acb7e879 | b40aecca82f9aff7c9a4c48692643350f5c11d32 | /03.py | 3fe6ecffd7c68d138cd44ff36839e07e0e925183 | [] | no_license | rovesoul/All-about-Python | cfbdf0e85c57f57842fc05d65b4fea1ab14291ba | 19c95ffafbb62cc7bf9eb905fe0c9a72ba3197b1 | refs/heads/master | 2020-09-09T17:44:34.141552 | 2020-03-09T10:28:37 | 2020-03-09T10:28:37 | 221,515,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # -*- encoding: utf8 -*-
'''假如共享的资源有多个,多线程竞争时一般使用信号量(Semaphore)同步。信号量有一个初始值,表示当前可用的资源数,多线程执行过程中会通过 acquire() 和 release() 操作,动态的加减信号量。比如,有30个工人都需要电锤,但是电锤总共只有5把。使用信号量(Semaphore)解决竞争的代码如下:
————————————————
版权声明:本文为CSDN博主「天元浪子」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/xufive/article/details/102993666'''
import time
import threading
S = threading.Semaphore(5) # 有5把电锤可供使用
def us_hammer(id):
"""线程函数"""
S.acquire() # P操作,阻塞式请求电锤,
time.sleep(0.3)
print('%d号刚刚用完电锤' % id)
S.release() # V操作,释放资源(信号量加1)
def demo():
threads = list()
for i in range(30): # 有30名工人要求使用电锤
threads.append(threading.Thread(target=us_hammer, args=(i,)))
threads[-1].start()
for t in threads:
t.join()
print('所有线程工作结束')
if __name__ == '__main__':
demo()
| [
"noreply@github.com"
] | noreply@github.com |
2793cff17dd0cba8b60d3c3c631244f411ae3c47 | 6884d7784887f87cda93073fe7a0b8c702b9e9f8 | /test.py | 66656a87c223cbd1eeefb410cbe5a8240e6f31cc | [] | no_license | RazorBest/Tema2-PC---client-server-TCP | 29bff8d1091aafec9f825672a30fb4780570cad8 | f67b000ea48e126585cf66b24d7601859c64a52d | refs/heads/master | 2023-05-05T10:17:20.235348 | 2021-05-30T12:56:11 | 2021-05-30T12:56:11 | 373,753,914 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,488 | py | import subprocess
import sys
import signal
import time
import os
import pprint
import json
from contextlib import contextmanager
from subprocess import Popen, PIPE, STDOUT
from os import path
from time import sleep
# default port for the server
port = "12345"
# default IP for the server
ip = "127.0.0.1"
# default UDP client path
udp_client_path = "pcom_hw2_udp_client"
# default size of test output line
test_output_line_size = 40
####### Test utils #######
# dictionary containing test IDs and their statuses
tests = {
"compile": "not executed",
"server_start": "not executed",
"c1_start": "not executed",
"data_unsubscribed": "not executed",
"c1_subscribe_all": "not executed",
"data_subscribed": "not executed",
"c1_stop": "not executed",
"c1_restart": "not executed",
"data_no_clients": "not executed",
"same_id": "not executed",
"c2_start": "not executed",
"c2_subscribe": "not executed",
"c2_subscribe_sf": "not executed",
"data_no_sf": "not executed",
"data_sf": "not executed",
"c2_stop": "not executed",
"data_no_sf_2": "not executed",
"data_sf_2": "not executed",
"c2_restart_sf": "not executed",
"quick_flow": "not executed",
"server_stop": "not executed",
}
def pass_test(test):
"""Marks a test as passed."""
tests[test] = "passed"
def fail_test(test):
"""Marks a test as failed."""
tests[test] = "failed"
def print_test_results():
"""Prints the results for all the tests."""
print("")
print("RESULTS")
print("-------")
for test in tests:
dots = test_output_line_size - len(test) - len(tests.get(test))
print(test, end="")
print('.' * dots, end="")
print(tests.get(test))
####### Topic utils #######
class Topic:
"""Class that represents a subscription topic with data."""
def __init__(self, name, category, value):
self.name = name
self.category = category
self.value = value
def print(self):
"""Prints the current topic and data in the expected format."""
return self.name + " - " + self.category + " - " + self.value
@staticmethod
def generate_topics():
"""Generates topics with data for various kinds."""
ret = []
ret.append(Topic("a_non_negative_int", "INT", "10"))
ret.append(Topic("a_negative_int", "INT", "-10"))
ret.append(Topic("a_larger_value", "INT", "1234567890"))
ret.append(Topic("a_large_negative_value", "INT", "-1234567890"))
ret.append(Topic("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx", "INT", "10"))
ret.append(Topic("that_is_small_short_real", "SHORT_REAL", "2.30"))
ret.append(Topic("that_is_big_short_real", "SHORT_REAL", "655.05"))
ret.append(Topic("that_is_integer_short_real", "SHORT_REAL", "17"))
ret.append(Topic("float_seventeen", "FLOAT", "17"))
ret.append(Topic("float_minus_seventeen", "FLOAT", "-17"))
ret.append(Topic("a_strange_float", "FLOAT", "1234.4321"))
ret.append(Topic("a_negative_strange_float", "FLOAT", "-1234.4321"))
ret.append(Topic("a_subunitary_float", "FLOAT", "0.042"))
ret.append(Topic("a_negative_subunitary_float", "FLOAT", "-0.042"))
ret.append(Topic("ana_string_announce", "STRING", "Ana are mere"))
ret.append(Topic("huge_string", "STRING", "abcdefghijklmnopqrstuvwxyz"))
return ret
####### Process utils#######
class Process:
"""Class that represents a process which can be controlled."""
def __init__(self, command, cwd=""):
self.command = command
self.started = False
self.cwd = cwd
def start(self):
"""Starts the process."""
try:
if self.cwd == "":
self.proc = Popen(self.command, universal_newlines=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
else:
self.proc = Popen(self.command, universal_newlines=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=self.cwd)
self.started = True
except FileNotFoundError as e:
print(e)
quit()
def finish(self):
"""Terminates the process and waits for it to finish."""
if self.started:
self.proc.terminate()
self.proc.wait(timeout=1)
self.started = False
def send_input(self, proc_in):
"""Sends input and a newline to the process."""
if self.started:
self.proc.stdin.write(proc_in + "\n")
self.proc.stdin.flush()
def get_output(self):
"""Gets one line of output from the process."""
if self.started:
return self.proc.stdout.readline()
else:
return ""
def get_output_timeout(self, tout):
"""Tries to get one line of output from the process with a timeout."""
if self.started:
with timeout(tout):
try:
return self.proc.stdout.readline()
except TimeoutError as e:
return "timeout"
else:
return ""
def get_error(self):
"""Gets one line of stderr from the process."""
if self.started:
return self.proc.stderr.readline()
else:
return ""
def get_error_timeout(self, tout):
"""Tries to get one line of stderr from the process with a timeout."""
if self.started:
with timeout(tout):
try:
return self.proc.stderr.readline()
except TimeoutError as e:
return "timeout"
else:
return ""
def is_alive(self):
"""Checks if the process is alive."""
if self.started:
return self.proc.poll() is None
else:
return False
####### Helper functions #######
@contextmanager
def timeout(time):
"""Raises a TimeoutError after a duration specified in seconds."""
signal.signal(signal.SIGALRM, raise_timeout)
signal.alarm(time)
try:
yield
except TimeoutError:
pass
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
"""Raises a TimeoutError."""
raise TimeoutError
def make_target(target):
"""Runs a makefile for a given target."""
subprocess.run(["make " + target], shell=True)
return path.exists(target)
def make_clean():
"""Runs the clean target in a makefile."""
subprocess.run(["make clean"], shell=True)
def exit_if_condition(condition, message):
"""Exits and prints the test results if a condition is true."""
if condition:
print(message)
make_clean()
print_test_results()
quit()
def get_procfs_values(rmem):
"""Reads TCP buffer sizes from procfs."""
path = "/proc/sys/net/ipv4/tcp_" + ("rmem" if rmem else "wmem")
#path = "tcp_" + ("rmem" if rmem else "wmem")
file = open(path, "r")
values = file.readline().split()
if len(values) < 3:
print("Error: could not read correctly from procfs")
return ["error"]
return values
def set_procfs_values(rmem, values):
"""Writes TCP buffer sizes to procfs."""
path = "/proc/sys/net/ipv4/tcp_" + ("rmem" if rmem else "wmem")
#path = "tcp_" + ("rmem" if rmem else "wmem")
if not os.access(path, os.W_OK):
print("Error: not enough permissions to write to procfs")
return False
file = open(path, "w")
file.write(values[0] + " " + values[1] + " " + values[2])
return True
####### Test helper functions #######
def run_udp_client(mode=True, type="0"):
"""Runs a UDP client which generates messages on one or multiple topics."""
if mode:
udpcl = Process(["python3", "udp_client.py", ip, port], udp_client_path)
udpcl.start()
for i in range(19):
outudp = udpcl.get_output_timeout(1)
udpcl.finish()
else:
udpcl = Process(["python3", "udp_client.py", "--mode", "manual", ip, port], udp_client_path)
udpcl.start()
sleep(1)
udpcl.send_input(type)
sleep(1)
udpcl.send_input("exit")
udpcl.finish()
def start_and_check_client(server, id, restart=False, test=True):
"""Starts a TCP client and checks that it starts."""
if test:
fail_test("c" + id + ("_restart" if restart else "_start"))
print("Starting subscriber C" + id)
client = Process(["./subscriber", "C" + id, ip, port])
client.start()
sleep(1)
outs = server.get_output_timeout(2)
success = True
# check if the client successfully connected to the server
if not client.is_alive():
print("Error: subscriber C" + id + " is not up")
success = False
if not outs.startswith("New client C" + id + " connected from"):
print("Error: server did not print that C" + id + " is connected")
success = False
if success and test:
pass_test("c" + id + ("_restart" if restart else "_start"))
return client, success
def check_subscriber_output(c, client_id, target):
"""Compares the output of a TCP client with an expected string."""
outc = c.get_output_timeout(1)
if target not in outc:
#print(bytearray(outc, "utf8"))
#print(bytearray(target, "utf8"))
print("Error: C" + client_id + " output should contain [" + target + "], is actually [" + outc.rstrip() + "]")
return False
return True
def check_subscriber_stop(server, c, id):
"""Stops a TCP client and checks that it stops."""
print("Disconnecting subscriber C" + id)
c.send_input("exit")
sleep(1)
# check that the process is no longer alive
outs = server.get_output_timeout(1)
message = "Client C" + id + " disconnected."
if outs.rstrip() != message or c.is_alive():
print("Error: client C" + id + " not disconnected")
return False
return True
def check_two_subscribers(c1, c2, topics, topic_id):
"""Compares the output of two TCP clients with an expected string."""
topic = topics[topic_id]
# generate one message for the topic
print("Generating one message for topic " + topic.name)
run_udp_client(False, str(topic_id))
# check that both subscribers receive the message correctly
target = topic.print()
success = check_subscriber_output(c1, "1", target)
return check_subscriber_output(c2, "2", target) and success
####### Test functions #######
def run_test_compile():
"""Tests that the server and subscriber compile."""
fail_test("compile")
print("Compiling")
exit_if_condition(not make_target("server"), "Error: server could not be built")
exit_if_condition(not make_target("subscriber"), "Error: subscriber could not be built")
pass_test("compile")
def run_test_server_start():
"""Tests that the server starts correctly."""
fail_test("server_start")
print("Starting the server")
server = Process(["./server", port])
server.start()
sleep(1)
exit_if_condition(not server.is_alive(), "Error: server is not up")
pass_test("server_start")
return server
def run_test_c1_start(server):
"""Tests that a subscriber C1 starts correctly."""
return start_and_check_client(server, "1")
def run_test_data_unsubscribed(server, c1):
"""Tests that messages from topics not subscribed to are not received."""
fail_test("data_unsubscribed")
# generate one message for each topic
print("Generating one message for each topic")
run_udp_client()
# check that the server and C1 print nothing
outs = server.get_output_timeout(1)
outc1 = c1.get_output_timeout(1)
failed = False
if outs != "timeout":
print("Error: server printing [" + outs.rstrip() + "]")
failed = True
if outc1 != "timeout":
print("Error: C1 printing [" + outc1.rstrip() + "]")
failed = True
if not failed:
pass_test("data_unsubscribed")
def run_test_c1_subscribe_all(server, c1, topics):
"""Tests that subscriber C1 can subscribe to all topics."""
fail_test("c1_subscribe_all")
print("Subscribing C1 to all topics without SF")
failed = False
for topic in topics:
c1.send_input("subscribe " + topic.name + " 0")
outc1 = c1.get_output_timeout(1)
if not outc1.startswith("Subscribed to topic."):
print("Error: C1 not subscribed to all topics")
failed = True
break
if not failed:
pass_test("c1_subscribe_all")
def run_test_data_subscribed(server, c1, topics):
"""Tests that subscriber C1 receives messages on subscribed topics."""
fail_test("data_subscribed")
# generate one message for each topic
print("Generating one message for each topic")
run_udp_client()
# check that C1 receives all the messages correctly
success = True
for topic in topics:
success = check_subscriber_output(c1, "1", topic.print()) and success
if success:
pass_test("data_subscribed")
def run_test_c1_stop(server, c1):
"""Tests that subscriber C1 stops correctly."""
fail_test("c1_stop")
if check_subscriber_stop(server, c1, "1"):
pass_test("c1_stop")
return True
return False
def run_test_c1_restart(server):
"""Tests that subscriber C1 restarts correctly."""
# generate one message for each topic
print("Generating one message for each topic")
run_udp_client()
# restart and check subscriber C1
return start_and_check_client(server, "1", True)
def run_test_data_no_clients(c1):
"""Tests that subscriber C1 doesn't receive anything from the server upon restart."""
fail_test("data_no_clients")
if c1.get_output_timeout(1) == "timeout":
pass_test("data_no_clients")
def run_test_same_id(server):
"""Tests that the server doesn't accept two subscribers with the same ID."""
fail_test("same_id")
print("Starting another subscriber with ID C1")
c1bis = Process(["./subscriber", "C1", ip, port])
c1bis.start()
sleep(1)
outs = server.get_output_timeout(2)
success = True
if c1bis.is_alive():
print("Error: second subscriber C1 is up")
success = False
if not outs.startswith("Client C1 already connected."):
print("Error: server did not print that C1 is already connected")
success = False
if success:
pass_test("same_id")
def run_test_c2_start(server):
"""Tests that a subscriber C2 starts correctly."""
return start_and_check_client(server, "2")
def run_test_c2_subscribe(c2, topics):
"""Tests that subscriber C2 can subscribe to a topic."""
fail_test("c2_subscribe")
topic = topics[0]
print("Subscribing C2 to topic " + topic.name + " without SF")
c2.send_input("subscribe " + topic.name + " 0")
outc2 = c2.get_output_timeout(1)
if not outc2.startswith("Subscribed to topic."):
print("Error: C2 not subscribed to topic " + topic.name)
return
pass_test("c2_subscribe")
def run_test_c2_subscribe_sf(c2, topics):
"""Tests that subscriber C2 can subscribe to a topic with SF."""
fail_test("c2_subscribe_sf")
topic = topics[1]
print("Subscribing C2 to topic " + topic.name + " with SF")
c2.send_input("subscribe " + topic.name + " 1")
outc2 = c2.get_output_timeout(1)
if not outc2.startswith("Subscribed to topic."):
print("Error: C2 not subscribed to topic " + topic.name)
return
pass_test("c2_subscribe_sf")
def run_test_data_no_sf(c1, c2, topics):
"""Tests that subscribers C1 and C2 receive messages on a subscribed topic."""
fail_test("data_no_sf")
if check_two_subscribers(c1, c2, topics, 0):
pass_test("data_no_sf")
def run_test_data_sf(c1, c2, topics):
"""Tests that subscribers C1 and C2 receive messages on a subscribed topic with SF."""
fail_test("data_sf")
if check_two_subscribers(c1, c2, topics, 1):
pass_test("data_sf")
def run_test_c2_stop(server, c2):
"""Tests that subscriber C2 stops correctly."""
fail_test("c2_stop")
if check_subscriber_stop(server, c2, "2"):
pass_test("c2_stop")
return True
return False
def run_test_data_no_sf_2(c1, topics):
"""Tests that subscriber C1 receive a message on a subscribed topic."""
fail_test("data_no_sf_2")
topic = topics[0]
# generate one message for the non-SF topic
print("Generating one message for topic " + topic.name)
run_udp_client(False, "0")
# check that C1 receives the message correctly
if check_subscriber_output(c1, "1", topic.print()):
pass_test("data_no_sf_2")
def run_test_data_sf_2(c1, topics):
"""Tests that subscriber C1 receive three messages on a subscribed topic with SF."""
topic = topics[1]
fail_test("data_sf_2")
# generate three messages for the SF topic
print("Generating three messages for topic " + topic.name)
success = True
for i in range(3):
run_udp_client(False, "1")
# check that C1 receives the message correctly
success = check_subscriber_output(c1, "1", topic.print()) and success
if success:
pass_test("data_sf_2")
def run_test_c2_restart_sf(server, topics):
"""Tests that subscriber C2 receives missed SF messages upon restart."""
fail_test("c2_restart_sf")
topic = topics[1]
# restart and check subscriber C2
c2, success = start_and_check_client(server, "2", True, False)
if success:
# check that all three SF messages are properly received
ok = True
for i in range(3):
ok = check_subscriber_output(c2, "2", topic.print()) and ok
if ok:
pass_test("c2_restart_sf")
return c2
def run_test_quick_flow(c1, topics):
"""Tests that subscriber C1 receives many messages in quick succession on subscribed topics."""
fail_test("quick_flow")
rmem = get_procfs_values(True)
wmem = get_procfs_values(False)
if rmem[0] == "error" or wmem[0] == "error":
return
if not set_procfs_values(True, ["5", "5", "5"]):
return
if not set_procfs_values(False, ["5", "5", "5"]):
set_procfs_values(True, rmem)
return
# generate one message for each topic 30 times in a row
print("Generating one message for each topic 30 times in a row")
for i in range(30):
run_udp_client()
# check that C1 receives all the messages correctly
success = True
for i in range(30):
for topic in topics:
print(i)
print(topic.print())
success = check_subscriber_output(c1, "1", topic.print()) and success
if success:
pass_test("quick_flow")
set_procfs_values(True, rmem)
set_procfs_values(False, wmem)
def run_test_server_stop(server, c1):
"""Tests that the server stops correctly."""
fail_test("server_stop")
print("Stopping the server")
server.send_input("exit")
sleep(1)
success = True
if server.is_alive():
print("Error: server is still up")
success = False
if c1.is_alive():
print("Error: C1 is still up")
success = False
if success:
pass_test("server_stop")
def h2_test():
"""Runs all the tests."""
# clean up
make_clean()
# generate the topics
topics = Topic.generate_topics()
# build the two binaries and check
run_test_compile()
# start the server and check it is running
server = run_test_server_start()
# start a subscriber C1 and check it is running
c1, success = run_test_c1_start(server)
if success:
# generate data and check that it isn't received by C1
run_test_data_unsubscribed(server, c1)
# subscribe C1 to all topics and verify
run_test_c1_subscribe_all(server, c1, topics)
# generate messages on all topics and check that C1 receives them
run_test_data_subscribed(server, c1, topics)
# stop C1 and check it exits correctly
success = run_test_c1_stop(server, c1)
if success:
# restart C1 and check that it starts properly
c1, success = run_test_c1_restart(server)
if success:
# check that C1 doesn't receive anything from the server
run_test_data_no_clients(c1)
# connect a client with the same ID as C1 and check that it fails
run_test_same_id(server)
# start a subscriber C2 and check it is running
c2, success = run_test_c2_start(server)
if success:
# subscribe C2 to a single non-SF topic and check
run_test_c2_subscribe(c2, topics)
# subscribe C2 to a single SF topic and check
run_test_c2_subscribe_sf(c2, topics)
# generate a message on the non-SF topic and check
run_test_data_no_sf(c1, c2, topics)
# generate a message on the SF topic and check
run_test_data_sf(c1, c2, topics)
# stop C2 and check it exits correctly
success = run_test_c2_stop(server, c2)
if success:
# generate a message on the non-SF topic and check
run_test_data_no_sf_2(c1, topics)
# generate three messages on the non-SF topic and check
run_test_data_sf_2(c1, topics)
# restart C2 and check that all SF messages are received
c2 = run_test_c2_restart_sf(server, topics)
pass
# send all types of message 30 times in quick succesion and check
run_test_quick_flow(c1, topics)
# close the server and check that C1 also closes
run_test_server_stop(server, c1)
# clean up
make_clean()
# print test results
print_test_results()
# run all tests
h2_test()
| [
"razvan.pricop@protonmail.com"
] | razvan.pricop@protonmail.com |
42551afeed90f3e1fb4f2db65044a64b9fa37acd | eff9f57daedc291a5f061dfe1ca19693850a2a04 | /tests/__init__.py | 9eefcbdb4ad287dadaee04ebc11a67aba0eaafa2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jameswnl/aiops-data-collector | 4a5ce67cfff18d21f4b5a929034ba236f7fd6903 | 16dccb54d9b5a0bb0c1e76c04d9f4066325983af | refs/heads/master | 2020-04-19T11:51:05.484483 | 2019-06-18T14:39:58 | 2019-06-18T14:39:58 | 168,178,016 | 0 | 0 | Apache-2.0 | 2019-06-26T05:51:35 | 2019-01-29T15:28:41 | Python | UTF-8 | Python | false | false | 38 | py | """Data collector unit test suite."""
| [
"tcoufal@redhat.com"
] | tcoufal@redhat.com |
c8ce9fe2ffe6f9aad8ba442ef8c5905d1888f424 | c97d3c8848e4f03edb6c64b6abff530a6e74d616 | /apps/models_sklearn_spark/Matrix_factorization/handler.py | 1b6060f59557d47ea3890cf8f7f98d14845086ee | [
"Apache-2.0"
] | permissive | simhaonline/Django_web | eeb80d8f32a460258fceb30ecececd7410949f72 | f7df1a7b101d41835a334b78cddf3570968799e4 | refs/heads/master | 2023-04-24T23:33:51.535515 | 2021-04-02T15:20:29 | 2021-04-02T15:20:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ : 矩阵分解
# __date__: 2020/09/09 09
try:
from apps.data.handler import get_ml_1m_ratings_df
except:
pass
from math import sqrt
from sklearn.metrics import mean_squared_error
from scipy.sparse.linalg import svds
from sklearn.model_selection import cross_validate, train_test_split
import sys
import os
import numpy as np
import pandas as pd
from functools import lru_cache
# sys.path.append(os.path.dirname(os.path.abspath('./')))
# from apps.models_sklearn_spark.Matrix_factorization.handler import ratings_df
# apps_floder = os.path.dirname(os.path.dirname(__file__))
# ratings_file = os.path.join(apps_floder, 'data\\ml-1m\\ratings.csv')
# ratings_df = pd.read_csv(ratings_file, sep=',', engine='python')
def data_split(
ratings_df: '数据',
ratio: '分割数据的比例' = 1/4) -> (pd.DataFrame, pd.DataFrame):
"""
按照ratio比例分割数据
"""
train_data = ratings_df.head(round(len(ratings_df)*ratio))
test_data = ratings_df.tail(round(len(ratings_df)*(1-ratio)))
return train_data, test_data
def get_data_sparsity(ratings_df, n_users, n_movies) -> float:
"""
计算数据集的稀疏度
"""
sparsity = round(ratings_df.size/float(n_users*n_movies), 3)
print('The sparsity level of MovieLens is ' + str(sparsity))
return sparsity
def create_uesr_item(ratings_df, n_users, n_movies) -> (np.ndarray, np.ndarray):
"""
创建uesr-item矩阵,此处需创建训练和测试两个UI矩阵,n_users cols * n_movies rows
"""
train_data, test_data = data_split(ratings_df)
train_data_matrix = np.zeros((n_users, n_movies))
for line in train_data.itertuples():
train_data_matrix[line[1] - 1, line[2] - 1] = line[3]
test_data_matrix = np.zeros((n_users, n_movies))
for line in test_data.itertuples():
test_data_matrix[line[1] - 1, line[2] - 1] = line[3]
return train_data_matrix, test_data_matrix
def rmse(prediction, ground_truth) -> float:
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten()
res = sqrt(mean_squared_error(prediction, ground_truth))
return res
@lru_cache(None)
def mf_svds(k) -> (float, np.ndarray):
ratings_df = get_ml_1m_ratings_df()
n_users = max(ratings_df.UserID.unique())
n_movies = max(ratings_df.MovieID.unique())
print('Number of users = ' + str(n_users) +
' | Number of movies = ' + str(n_movies))
train_data_matrix, test_data_matrix = create_uesr_item(
ratings_df, n_users, n_movies)
u, s, vt = svds(train_data_matrix, k=20)
u.shape, s.shape, vt.shape
s_diag_matrix = np.diag(s)
X_pred = np.dot(np.dot(u, s_diag_matrix), vt)
_rmse = rmse(X_pred, test_data_matrix)
print('User-based CF MSE: ' + str(_rmse))
return _rmse, X_pred
| [
"aboyinsky@outlook.com"
] | aboyinsky@outlook.com |
b2ab8649cbf9247fe07ed8dd265a8e687f8ff08a | 06b25529d2fc50da729bd288522c244f1a05b460 | /Iterator/sample/main.py | 2a3d112a3784b23b73be93a39d320cd379d39d94 | [] | no_license | PlumpMath/DesignPatternAtPython | bf9fdd5bcd886acd18f9380a3b4fbdbc597b2dba | e8780e345400cf3458fd35d63d303de4b333cf87 | refs/heads/master | 2021-01-20T09:52:03.863639 | 2016-06-27T01:07:22 | 2016-06-27T01:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
from bookShelf import BookShelf
from book import Book
if __name__ == '__main__':
bookShelf = BookShelf(1)
bookShelf.appendBox(Book("addd"))
# print t_class.__arg | [
"suzukitoshiyuki@r58-89-24-133.yz.yamagata-u.ac.jp"
] | suzukitoshiyuki@r58-89-24-133.yz.yamagata-u.ac.jp |
64d81ce55d80a1ba9a97d94bb44b757f87bae471 | 51290cfa282166bc06f4fbdcce050bbea2183a67 | /rgbread.py | 4623b7559337ec3a096555796e1be9266634b4a7 | [] | no_license | tweirtx/freshmancs-ev3-python | d6e7cc88c0a18f9b66da57f5248c544ddfe810e8 | 5142885e93407372efff6ad6e0135743ba23cbed | refs/heads/master | 2021-03-24T09:19:49.308468 | 2018-04-29T17:31:15 | 2018-04-29T17:31:15 | 116,987,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from ev3dev import ev3
sensor = ev3.ColorSensor()
sensor.mode = sensor.MODE_RGB_RAW
while True:
print(sensor.color) | [
"tweirtx@gmail.com"
] | tweirtx@gmail.com |
b768b58cf3721bb2f6b3a2fc866798aa78ca6847 | a990bd26d3a69d1ea6699c85efa2cea99452c3df | /problems/leetcode/rottingOranges994.py | 5388c929d4cee4a0f12199681fa2844bb927234b | [] | no_license | abecus/DS-and-Algorithms | 5f1a948a085465ae165090ec957a9d5307ce729d | 3259e8183382265a27cf8c91e37d0086175a5703 | refs/heads/master | 2022-05-05T07:07:08.194243 | 2022-04-05T16:23:39 | 2022-04-05T16:23:39 | 193,111,610 | 11 | 6 | null | 2020-11-18T16:19:18 | 2019-06-21T14:27:25 | Python | UTF-8 | Python | false | false | 1,960 | py | """
_________________________994. Rotting Oranges_________________________
Difficulty: Medium Likes: 1259 Dislikes: 170 Solution: Available
Total Accepted: 77.3K Total Submission: 164.3K Acceptance Rate: 47.0%
Tags: Breadth-first Search
In a given grid, each cell can have one of three values: the value 0
representing an empty cell; the value 1 representing a fresh orange;
the value 2 representing a rotten orange. Every minute, any fresh
orange that is adjacent (4-directionally) to a rotten orange becomes
rotten. Return the minimum number of minutes that must elapse until no
cell has a fresh orange. If this is impossible, return -1 instead.
Example 1:
Input: [[2,1,1],[1,1,0],[0,1,1]]
Output: 4
Example 2:
Input: [[2,1,1],[0,1,1],[1,0,1]]
Output: -1
Example 3:
Input: [[0,2]]
Output: 0
Note:
1 <= grid.length <= 101 <= grid[0].length <= 10grid[i][j] is only 0, 1, or 2.
"""
def orangesRotting(grid):
r=len(grid)
c=len(grid[0])
def get_adj(i,j):
for x,y in zip([1,-1,0,0],[0,0,-1,1]):
if 0<=i+x<r and 0<=j+y<c:
yield (i+x,j+y)
q=[(i,j) for i in range(r) for j in range(c) if grid[i][j]==2]
res=0
while q:
temp = []
for i,j in q:
for x,y in get_adj(i,j):
if grid[x][y]==1:
grid[x][y]=2
temp.append((x,y))
res+=1
q = temp.copy()
for i in range(r):
for j in range(c):
if grid[i][j]==1:
return -1
return res-1 if res else res
if __name__ == "__main__":
grid = [[2,1,1],[1,1,0],[0,1,1]]
# grid = [[2,1,1],
# [0,1,1],
# [1,0,1]]
# grid = [[0,1]]
print(orangesRotting(grid,))
"""
similarQuestions::
Walls and Gates: Medium
"""
| [
"insaaone@gmail.com"
] | insaaone@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.