blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
99152c525e224ec0a365e21d77b3ebc4c22869d4
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/platform/_anonymize_subscription.py
|
556651d3616d6ab2c798987dcb97a5bca11c71d0
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Platform Service (4.32.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import (
anonymize_subscription as anonymize_subscription_internal,
)
@click.command()
@click.argument("user_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def anonymize_subscription(
user_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(anonymize_subscription_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = anonymize_subscription_internal(
user_id=user_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"anonymizeSubscription failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
anonymize_subscription.operation_id = "anonymizeSubscription"
anonymize_subscription.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
aed3c3dee24a3b469803135f19b9e7d540ed0040
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/send_more_money_any_base.py
|
0a301f379ecfc9bc8758bc64492faf87ba6b7502
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
"""
Send more money in any base problem in cpmpy.
Send More Money
SEND
+ MORE
------
MONEY
using any base.
Examples:
Base 10 has one solution:
{9, 5, 6, 7, 1, 0, 8, 2}
Base 11 has three soltutions:
{10, 5, 6, 8, 1, 0, 9, 2}
{10, 6, 7, 8, 1, 0, 9, 3}
{10, 7, 8, 6, 1, 0, 9, 2}
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my CPMpy page: http://www.hakank.org/cpmpy/
"""
from cpmpy import *
from cpmpy.solvers import *
import numpy as np
from cpmpy_hakank import *
def print_solution(x):
print(x[0].value(),x[1].value(),x[2].value(),x[3].value(), " + ",
x[4].value(),x[5].value(),x[6].value(),x[7].value(), " = ",
x[8].value(),x[9].value(),x[10].value(),x[11].value(),x[12].value())
def send_more_money_any_base(base=10,num_sols=0,num_procs=1):
print("\nbase: ", base)
x = intvar(0,base-1,shape=8,name="x")
s,e,n,d,m,o,r,y = x
model = Model([
s*base**3 + e*base**2 + n*base + d +
m*base**3 + o*base**2 + r*base + e ==
m*base**4 + o*base**3 + n*base**2 + e*base + y,
s > 0,
m > 0,
AllDifferent((s,e,n,d,m,o,r,y))
]
)
xs = [s,e,n,d,
m,o,r,e,
m,o,n,e,y]
def print_sol():
print(x.value())
# Use OR-tools CP-SAT for speeding up the program.
ss = CPM_ortools(model)
# Flags to experiment with
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
num_solutions = ss.solveAll(display=print_sol)
print("Nr solutions:", num_solutions)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
return num_solutions
b = 10
send_more_money_any_base(b)
start_val = 8
end_val = 30
num_sols_per_base = []
for b in range(start_val,end_val+1):
num_sols_per_base += [send_more_money_any_base(b)]
print(f"\nNum sols per base ({start_val}..{end_val}):", num_sols_per_base)
|
[
"hakank@gmail.com"
] |
hakank@gmail.com
|
05d6b26ebd7a5f04611f8d54877b900714b3c030
|
cd073757b17465635da3fd30f770bbd8df0dff99
|
/tests/pytests/unit/beacons/test_swapusage.py
|
35b5ce28cdcad2d140bfeea54240048ddca33b61
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
marmarek/salt
|
cadc939ecbda915e2f4246f198d72f507c9c1960
|
974497174252ab46ee7dc31332ffb5b2f8395813
|
refs/heads/master
| 2022-11-19T14:05:28.778983
| 2021-03-12T00:30:45
| 2021-05-06T16:45:38
| 160,296,073
| 0
| 1
|
Apache-2.0
| 2018-12-04T04:19:43
| 2018-12-04T04:19:41
| null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
"""
tests.pytests.unit.beacons.test_swapusage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Swap usage beacon test cases
"""
from collections import namedtuple
import pytest
import salt.beacons.swapusage as swapusage
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {}
@pytest.fixture
def stub_swap_usage():
return namedtuple("sswap", "total used free percent sin sout")(
17179865088, 1674412032, 15505453056, 9.7, 1572110336, 3880046592,
)
def test_non_list_config():
config = {}
ret = swapusage.validate(config)
assert ret == (False, "Configuration for swapusage beacon must be a list.")
def test_empty_config():
config = [{}]
ret = swapusage.validate(config)
assert ret == (False, "Configuration for swapusage beacon requires percent.")
def test_swapusage_match(stub_swap_usage):
with patch("psutil.swap_memory", MagicMock(return_value=stub_swap_usage)):
config = [{"percent": "9%"}, {"interval": 30}]
ret = swapusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = swapusage.beacon(config)
assert ret == [{"swapusage": 9.7}]
def test_swapusage_nomatch(stub_swap_usage):
with patch("psutil.swap_memory", MagicMock(return_value=stub_swap_usage)):
config = [{"percent": "10%"}]
ret = swapusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = swapusage.beacon(config)
assert ret != [{"swapusage": 9.7}]
|
[
"megan.wilhite@gmail.com"
] |
megan.wilhite@gmail.com
|
69dc9b2fe646f35c01edb5ff5eb318b3291f0fa9
|
f125a883dbcc1912dacb3bf13e0f9263a42e57fe
|
/tsis5/part1/21.py
|
1fc47ed9b0a9a65ab42e5e8b5dd618f9a234fcfe
|
[] |
no_license
|
AruzhanBazarbai/pp2
|
1f28b9439d1b55499dec4158e8906954b507f04a
|
9d7f1203b6735b27bb54dfda73b3d2c6b90524c3
|
refs/heads/master
| 2023-07-13T05:26:02.154105
| 2021-08-27T10:20:34
| 2021-08-27T10:20:34
| 335,332,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
#Write a Python program to create a file where all letters of English alphabet are listed by specified number of letters on each line
import string
def letters_file_line(n):
with open("words1.txt", "w") as f:
alphabet = string.ascii_uppercase
letters = [alphabet[i:i + n] + "\n" for i in range(0, len(alphabet), n)]
f.writelines(letters)
letters_file_line(3)
|
[
"aruzhanart2003@mail.ru"
] |
aruzhanart2003@mail.ru
|
04c4f07bd5698c3866128a445148f52291cad0de
|
eef9afdc9ab5ee6ebbfa23c3162b978f137951a8
|
/oops_concept/encapsulation/encapsul3.py
|
0804609b0c00d8bb1f7947f4a6266df30c65c544
|
[] |
no_license
|
krishnadhara/programs-venky
|
2b679dca2a6158ecd6a5c28e86c8ed0c3bab43d4
|
0f4fd0972dec47e273b6677bbd32e2d0742789ae
|
refs/heads/master
| 2020-06-05T04:48:45.792427
| 2019-06-17T09:59:45
| 2019-06-17T09:59:45
| 192,318,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
class SeeMee:
def youcanseemee(self):
return "you can see mee"
def __youcantseemee(self):
return "you cant see mee"
p = SeeMee()
print(p.youcanseemee())
print(p._SeeMee__youcantseemee())
|
[
"49470138+krishnadhara@users.noreply.github.com"
] |
49470138+krishnadhara@users.noreply.github.com
|
3ff2138e33c5184658a9ea4eb2200b7f23f05ad5
|
df01d62513cd03c0b794dcebcd5d3de28084e909
|
/djbShop/djbShop/urls.py
|
8005492bfc9e6511dbaa843d4b98e9dc8374289a
|
[] |
no_license
|
codingEzio/code_py_bookreview_django2_by_example
|
32206fc30b7aab6f0a613d481089bf8bffd1380c
|
9c2d022364e1b69063bd7a0b741ebeeb5b7d4e13
|
refs/heads/master
| 2022-12-23T20:32:42.899128
| 2022-12-20T07:40:11
| 2022-12-20T07:40:11
| 207,786,302
| 1
| 0
| null | 2019-12-28T04:59:41
| 2019-09-11T10:35:44
|
Python
|
UTF-8
|
Python
| false
| false
| 644
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('cart/', include('cart.urls', namespace='cart')),
path('orders/', include('orders.urls', namespace='orders')),
path('', include('shop.urls', namespace='shop')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
if settings.DEBUG:
urlpatterns += static(prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"assassinste@gmail.com"
] |
assassinste@gmail.com
|
69370efa4a456f47d0ca107e5eee7219d752893c
|
b641e56db0f49dee6d27bea933255bff5caa6058
|
/app/__init__.py
|
2e99a6f67bb73c23a1950f7a7efd484e4990a7b7
|
[
"BSD-2-Clause"
] |
permissive
|
uniphil/kibera-school-project
|
ed2bffdc938878f2370ff458aef0582b48c1ba1f
|
4e77538af66065ea00d7a7559129026a72aa4353
|
refs/heads/master
| 2016-09-03T01:44:43.809992
| 2014-05-20T21:23:15
| 2014-05-20T21:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
# -*- coding: utf-8 -*-
"""
app
~~~
Static site generator for the Kibera School Project by Feedback Labs
This file initializes the app and manages configuration before importing
the other modules to do all the work.
"""
# from flask import Flask
# app = Flask('app')
# app.config['FREEZER_DESTINATION'] = app.config['BUILD_OUTPUT'] = '../output'
# app.config['CONTENT_FOLDER'] = '../content'
# app.config['CNAME'] = 'kibera-staging.precomp.ca'
# from . import content
# from . import views
# from . import static
|
[
"uniphil@gmail.com"
] |
uniphil@gmail.com
|
30ce2874708c452ce8a921d0df7df203cd619871
|
34d88082307281333ef4aeeec012a3ff5f8ec06e
|
/w3resource/List_/Q058.py
|
02e569e86ae9c120564deb6898a1645490b175de
|
[] |
no_license
|
JKChang2015/Python
|
a6f8b56fa3f9943682470ae57e5ad3266feb47a7
|
adf3173263418aee5d32f96b9ea3bf416c43cc7b
|
refs/heads/master
| 2022-12-12T12:24:48.682712
| 2021-07-30T22:27:41
| 2021-07-30T22:27:41
| 80,747,432
| 1
| 8
| null | 2022-12-08T04:32:06
| 2017-02-02T17:05:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
# -*- coding: UTF-8 -*-
# Q058
# Created by JKChang
# Thu, 31/08/2017, 16:35
# Tag:
# Description: 58. Write a Python program to replace the last element in a list with another list.
# Sample data : [1, 3, 5, 7, 9, 10], [2, 4, 6, 8]
# Expected Output: [1, 3, 5, 7, 9, 2, 4, 6, 8]
|
[
"jkchang2015@gmail.com"
] |
jkchang2015@gmail.com
|
98aac32f8002f1242c4200d8fc88f6918443821c
|
3560a70fefb3f32a85206b66704e473e1dec8761
|
/docs/conf.py
|
42236bc483f2519db436abdd4f9966ec17483ca0
|
[
"MIT"
] |
permissive
|
francispoulin/hankel
|
626c0e7e38934217ff274ea5a012023bb3b983d8
|
0dc05b37df16ac3ff1df5ebdd87b9107bf0197b0
|
refs/heads/master
| 2021-01-11T14:37:12.944019
| 2017-02-13T08:07:42
| 2017-02-13T08:07:42
| 80,174,672
| 0
| 0
| null | 2017-01-27T02:13:41
| 2017-01-27T02:13:41
| null |
UTF-8
|
Python
| false
| false
| 5,915
|
py
|
# -*- coding: utf-8 -*-
#
# hankel documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 13 10:17:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
#'sphinx.ext.githubpages',
'numpydoc',
'sphinx.ext.autosummary',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting' # Ensures syntax highlighting works in notebooks (workaround)
]
autosummary_generate = True
numpydoc_show_class_members=False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hankel'
copyright = u'2017, Steven Murray'
author = u'Steven Murray'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import re, io,os
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# The short X.Y version.
version = find_version("..","hankel", "__init__.py")
# The full version, including alpha/beta/rc tags.
release = find_version("..","hankel", "__init__.py")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store','templates','**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'hankeldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hankel.tex', u'hankel Documentation',
u'Steven Murray', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hankel', u'hankel Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hankel', u'hankel Documentation',
author, 'hankel', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[
"steven.murray@curtin.edu.au"
] |
steven.murray@curtin.edu.au
|
ae01ef1517dbf0dea633d118236da9dfb2265530
|
69a327a2af65d7252b624fe7cadd537eb51ca6d6
|
/String/BOJ_1259.py
|
d717f4cfec4f6568eee7fb4837c2ffb29c2ca38d
|
[] |
no_license
|
enriver/algorithm_python
|
45b742bd17c6a2991ac8095d13272ec4f88d9bf5
|
77897f2bf0241756ba6fd07c052424a6f4991090
|
refs/heads/master
| 2023-09-03T23:28:23.975609
| 2021-10-29T09:25:32
| 2021-10-29T09:25:32
| 278,907,031
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# 팰린드롬수
import sys
for _ in range(100):
n=sys.stdin.readline()
isP=True
if int(n)==0:
break
else:
val=list()
re_val=list()
for i in n:
val.append(i)
re_val.append(i)
re_val.reverse()
val.pop(-1)
re_val.pop(0)
for i in range(len(val)):
if val[i]!=re_val[i]:
isP=False
if isP==False:
print('no')
else:
print('yes')
|
[
"riverkeh@naver.com"
] |
riverkeh@naver.com
|
018beb2cab77071fe797d905f21a3473c8bffbd0
|
4e07727db834a45b2d77f0d9e54ed57796dc986d
|
/demo/picoalu.py
|
fc9ba7c3281dbd67618b12758831aa03c88c67b5
|
[] |
no_license
|
phanrahan/pico40
|
278447961d9d115f94b02e5322ba452442808e30
|
26cf9f6130666cc981ca27324bc4c1137eaf3252
|
refs/heads/master
| 2021-01-22T08:05:56.855776
| 2017-10-20T20:03:52
| 2017-10-20T20:03:52
| 81,874,575
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
import sys
from magma import *
from mantle import *
from parts.lattice.ice40.primitives.RAMB import ROMB
from pico.asm import *
from pico.cpu.seq import Sequencer
from pico.cpu.alu import Arith, Logic
from pico.cpu.ram import DualRAM
from boards.icestick import IceStick
icestick = IceStick()
icestick.Clock.on()
for i in range(8):
icestick.J1[i].input().on()
for i in range(8):
icestick.J3[i].output().on()
main = icestick.main()
ADDRN = 8
INSTN = 16
N = 8
def instructiondecode(insttype):
# alu instructions
logicinst = ROM2((1 << 0))(insttype)
arithinst = ROM2((1 << 1))(insttype)
aluinst = ROM2((1 << 0) | (1 << 1))(insttype)
# ld and st (immediate)
ldloinst = ROM4(1 << 8)(inst[12:16])
ldinst = ROM4(1 << 10)(inst[12:16])
stinst = ROM4(1 << 11)(inst[12:16])
# control flow instructions
jumpinst = ROM4(1 << 12)(inst[12:16])
#callinst = ROM4(1 << 13)(inst[12:16])
return logicinst, arithinst, aluinst, \
ldloinst, ldinst, stinst, \
jumpinst
# mov.py
# and.py
# or.py
# xor.py
# add.py
# sub.py
# and.py
#def prog():
# ldlo(r0, 0x55)
# ldlo(r1, 0x0f)
# and_(r1, r0)
# st(r1, 0)
# jmp(0)
# count.py
def prog():
ldlo(r0,0)
ldlo(r1,1)
loop = label()
add(r0,r1)
st(r0, 0)
jmp(loop)
mem = assemble(prog, 1<<ADDRN)
# program memory
romb = ROMB(mem)
wire( 1, romb.RCLKE )
wire( 1, romb.RE )
inst = romb.RDATA
# instruction decode
addr = inst[0:ADDRN]
imm = inst[0:N]
rb = inst[4:8]
ra = inst[8:12]
cc = inst[8:12]
op = inst[12:14]
insttype = inst[14:16]
logicinst, arithinst, aluinst, ldloinst, ldinst, stinst, jumpinst =\
instructiondecode(insttype)
jump = jumpinst
# romb's output is registered
# phase=0
# fetch()
# phase=1
# execute()
phase = TFF()(1)
# register write
regwr = LUT4((I0|I1|I2)&I3)(aluinst, ldloinst, ldinst, phase)
# sequencer
print 'Building sequencer'
seq = Sequencer(ADDRN)
pc = seq(addr, jump, phase)
wire(pc, romb.RADDR)
print 'Building input'
input = main.J1
print 'Building input mux'
regiomux = Mux(2, N)
regiomux(imm, input, ldinst)
print 'Building register input mux'
regimux = Mux(2, N)
print 'Building registers'
raval, rbval = DualRAM(4, ra, rb, ra, regimux, regwr)
# alu
print 'Building logic unit'
logicunit = Logic(N)
print 'Building arith unit'
arithunit = Arith(N)
print 'Building alu mux'
alumux = Mux(2, N)
print 'Wiring logic unit'
logicres = logicunit(raval, rbval, op[0], op[1])
print 'Wiring arith unit'
arithres = arithunit(raval, rbval, op[0], op[1])
wire(0, arithunit.CIN)
print 'Wiring alumux'
res = alumux(logicres, arithres, arithinst)
print 'Wiring register input mux'
ld = Or2()(ldinst, ldloinst)
regimux(res, regiomux, ld) # full io
print 'Wiring output'
output = Register(N, ce=True)
owr = And2()(stinst, phase)
output(raval, CE=owr)
wire(output, main.J3)
compile(sys.argv[1], main)
|
[
"hanrahan@cs.stanford.edu"
] |
hanrahan@cs.stanford.edu
|
f7b6b12562a778214d8a22df2c85123c9eb66bad
|
53edf6b0f4262ee76bb4e3b943394cfeafe54865
|
/simulation_codes/_archived/PREDCORR_1D_TSC/py2/save_routines.py
|
266faaaf7eebc39daea92fce756fa412c992f8b1
|
[] |
no_license
|
Yoshi2112/hybrid
|
f86265a2d35cb0a402ba6ab5f718717d8eeb740c
|
85f3051be9368bced41af7d73b4ede9c3e15ff16
|
refs/heads/master
| 2023-07-07T21:47:59.791167
| 2023-06-27T23:09:23
| 2023-06-27T23:09:23
| 82,878,960
| 0
| 1
| null | 2020-04-16T18:03:59
| 2017-02-23T03:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 22 10:44:46 2017
@author: iarey
"""
import numpy as np
import pickle
import os
import sys
from shutil import rmtree
import simulation_parameters_1D as const
from simulation_parameters_1D import generate_data, generate_plots, drive, save_path, NX, ne, density
from simulation_parameters_1D import idx_bounds, Nj, species_lbl, temp_type, dist_type, mass, charge, velocity, sim_repr, Tpar, Tper, temp_color
def manage_directories():
print 'Checking directories...'
if (generate_data == 1 or generate_plots == 1) == True:
if os.path.exists('%s/%s' % (drive, save_path)) == False:
os.makedirs('%s/%s' % (drive, save_path)) # Create master test series directory
print 'Master directory created'
path = ('%s/%s/run_%d' % (drive, save_path, const.run_num)) # Set root run path (for images)
if os.path.exists(path) == False:
os.makedirs(path)
print 'Run directory created'
else:
print 'Run directory already exists'
overwrite_flag = raw_input('Overwrite? (Y/N) \n')
if overwrite_flag.lower() == 'y':
rmtree(path)
os.makedirs(path)
elif overwrite_flag.lower() == 'n':
sys.exit('Program Terminated: Change run_num in simulation_parameters_1D')
else:
sys.exit('Unfamiliar input: Run terminated for safety')
return
def store_run_parameters(dt, data_dump_iter):
d_path = ('%s/%s/run_%d/data' % (drive, save_path, const.run_num)) # Set path for data
manage_directories()
if os.path.exists(d_path) == False: # Create data directory
os.makedirs(d_path)
# Single parameters
params = dict([('seed', const.seed),
('Nj', Nj),
('dt', dt),
('NX', NX),
('dxm', const.dxm),
('dx', const.dx),
('cellpart', const.cellpart),
('B0', const.B0),
('ne', ne),
('Te0', const.Te0),
('ie', const.ie),
('theta', const.theta),
('data_dump_iter', data_dump_iter),
('max_rev', const.max_rev),
('orbit_res', const.orbit_res),
('freq_res', const.freq_res),
('run_desc', const.run_description),
('method_type', 'PREDCORR'),
('particle_shape', 'TSC')
])
h_name = os.path.join(d_path, 'Header.pckl') # Data file containing dictionary of variables used in run
with open(h_name, 'wb') as f:
pickle.dump(params, f)
f.close()
print 'Header file saved'
# Particle values: Array parameters
p_file = os.path.join(d_path, 'p_data')
np.savez(p_file, idx_bounds = idx_bounds,
species_lbl = species_lbl,
temp_color = temp_color,
temp_type = temp_type,
dist_type = dist_type,
mass = mass,
charge = charge,
velocity = velocity,
density = density,
sim_repr = sim_repr,
Tpar = Tpar,
Tper = Tper)
print 'Particle data saved'
return
def save_data(dt, data_iter, qq, pos, vel, Ji, E, B, Ve, Te, dns):
d_path = ('%s/%s/run_%d/data' % (drive, save_path, const.run_num)) # Set path for data
r = qq / data_iter # Capture number
d_filename = 'data%05d' % r
d_fullpath = os.path.join(d_path, d_filename)
np.savez(d_fullpath, pos = pos, vel = vel, E = E[1:NX+1, 0:3], B = B[1:NX+2, 0:3], J = Ji[1:NX+1],
dns = dns[1:NX+1], Ve = Ve[1:NX+1], Te = Te[1:NX+1]) # Data file for each iteration
print 'Data saved'.format(qq)
|
[
"joshua.s.williams@uon.edu.au"
] |
joshua.s.williams@uon.edu.au
|
1ea83f90ea8db170062f45f51f1716794d7d0fc5
|
f0a5ad7b8aa39f51f233391fead0da3eabecc4ee
|
/.history/toolbox/sheets_20191128120456.py
|
54667dc8f469a984db42e907ae4f73a38cf6d539
|
[] |
no_license
|
OseiasBeu/webScrapping
|
e0a524847e55b24dbbd3d57bbe7fa43b4e101f48
|
1e72c7551aea355a891043baecfcbab8a89e719a
|
refs/heads/master
| 2022-10-25T18:12:50.858653
| 2020-06-18T01:29:24
| 2020-06-18T01:29:24
| 224,681,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import numpy as np
# import gspread
def insertPlanMiddleware(rows):
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SAMPLE_SPREADSHEET_ID = '1QSGAY_WyamEQBZ4ITdAGCVAbavR9t-D-4gPQx4Sbf7g'
SAMPLE_RANGE_NAME = 'middleware'
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'C:\\Users\\beuo\\Documents\\Demandas\\AtualizaMiddleIntegrationVtex\\toolbox\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# gs= gspread.authorize(service)
# gs
sheet = service.spreadsheets()
# result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
# range=SAMPLE_RANGE_NAME).execute()
# values = result.get('values', [])
# print(values)
rows = rows[['timeStamp','clienteEstado','warehouseId','Pendentes de integração']]
rows = np.asarray(rows)
# range_ = 'my-range' # TODO: Update placeholder value.
# How the input data should be interpreted.
for row in rows:
value_input_option = 'RAW' # TODO: Update placeholder value.
value_range_body = {"1","3","4","5"}
request = service.spreadsheets().values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=SAMPLE_RANGE_NAME, valueInputOption=value_input_option, body=value_range_body)
response = request.execute()
print(response)
|
[
"oseiasbeu@outlook.com"
] |
oseiasbeu@outlook.com
|
9b1b03442cc1fc5f738453149dd093c7e29e3127
|
3f5c0e166f8d88ce9aa9170dc4a80a5b64c64c6a
|
/melange-testing/tests/app/soc/modules/gsoc/views/test_accepted_orgs.py
|
aad371c70d5f7d5208def2a91af8f8303e948b7e
|
[] |
no_license
|
praveen97uma/GSoC-Docs
|
a006538b1d05ecab1a105139e4eb2f2cff36b07f
|
40c97239887a5c430be28a76cc2cd7a968511ce0
|
refs/heads/master
| 2021-01-23T16:26:56.949299
| 2012-06-14T19:31:30
| 2012-06-14T19:31:30
| 1,505,012
| 1
| 1
| null | 2020-02-08T05:17:26
| 2011-03-21T01:39:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the view for GSoC accepted orgs.
"""
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from tests.test_utils import GSoCDjangoTestCase
class AcceptedOrgsPageTest(GSoCDjangoTestCase):
"""Tests the page to display accepted organization.
"""
def setUp(self):
self.init()
self.url1 = '/gsoc/accepted_orgs/' + self.gsoc.key().name()
self.url2 = '/gsoc/program/accepted_orgs/' + self.gsoc.key().name()
self.url3 = '/program/accepted_orgs/' + self.gsoc.key().name()
def assertAcceptedOrgsPageTemplatesUsed(self, response):
"""Asserts that all the required templates to render the page were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/accepted_orgs/base.html')
self.assertTemplateUsed(response,
'v2/modules/gsoc/accepted_orgs/_project_list.html')
self.assertTemplateUsed(response, 'v2/soc/_program_select.html')
self.assertTemplateUsed(response,
'v2/modules/gsoc/accepted_orgs/_project_list.html')
self.assertTemplateUsed(response, 'v2/soc/list/lists.html')
self.assertTemplateUsed(response, 'v2/soc/list/list.html')
def testAcceptedOrgsAreDisplayedOnlyAfterTheyAreAnnounced(self):
"""Tests that the list of accepted organizations can be accessed only after
the organizations have been announced.
"""
self.timeline.orgSignup()
response = self.get(self.url3)
self.assertResponseForbidden(response)
response = self.get(self.url2)
self.assertResponseForbidden(response)
response = self.get(self.url1)
self.assertResponseForbidden(response)
def testAcceptedOrgsAreDisplayedAfterOrganizationsHaveBeenAnnounced(self):
"""Tests that the list of the organizations can not be accessed before
organizations have been announced.
"""
org_properties = {'scope': self.gsoc, 'status': 'active'}
seeder_logic.seed(GSoCOrganization, org_properties)
seeder_logic.seed(GSoCOrganization, org_properties)
self.timeline.orgsAnnounced()
response = self.get(self.url1)
self.assertResponseOK(response)
self.assertAcceptedOrgsPageTemplatesUsed(response)
list_data = self.getListData(self.url1, 0)
#Third organization is self.gsoc
self.assertEqual(len(list_data), 3)
response = self.get(self.url2)
self.assertResponseOK(response)
self.assertAcceptedOrgsPageTemplatesUsed(response)
list_data = self.getListData(self.url2, 0)
self.assertEqual(len(list_data), 3)
response = self.get(self.url3)
self.assertResponseOK(response)
self.assertAcceptedOrgsPageTemplatesUsed(response)
list_data = self.getListData(self.url3, 0)
self.assertEqual(len(list_data), 3)
|
[
"praveen97uma@gmail.com"
] |
praveen97uma@gmail.com
|
89fda3fd3aa06a78e69f6ff23728534a1850dea0
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/nb836onw9bek4FPDt_4.py
|
00c57b5b188d38dbf5b5076fa134b394a85da9e9
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
"""
Given a sentence, return the number of words which have the **same first and
last letter**.
### Examples
count_same_ends("Pop! goes the balloon") ➞ 1
count_same_ends("And the crowd goes wild!") ➞ 0
count_same_ends("No I am not in a gang.") ➞ 1
### Notes
* Don't count single character words (such as "I" and "A" in example #3).
* The function should not be case sensitive, meaning a capital "P" should match with a lowercase one.
* Mind the punctuation!
* Bonus points indeed for using regex!
"""
def count_same_ends(txt):
count = 0
a = "".join([i.lower() for i in txt if i.isalpha() or i.isspace()]).split()
for i in a:
if len(i) > 1 and i[0] == i[-1]:
count += 1
return count
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5003a32bcda2bbd12438f4b9346f880068dfb407
|
bc108434d5f485a5ca593942b0fbe2f4d044ebda
|
/pp/grpc/helloworld_protobuf.py
|
f7a1dedccb7a472def9fed97b93e26e7b8db1fb7
|
[] |
no_license
|
js-ts/AI
|
746a34493a772fb88aee296f463122b68f3b299d
|
353e7abfa7b02b45d2b7fec096b58e07651eb71d
|
refs/heads/master
| 2023-05-29T16:19:03.463999
| 2021-06-22T05:49:44
| 2021-06-22T05:49:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
import numpy as np
import base64
import json
import helloworld_pb2
data = np.random.rand(2, 2)
tmp = helloworld_pb2.ModelRequest()
tmp.a = 1
tmp.b = 2
tmp.data = base64.b64encode(data)
print(str(tmp))
tmp1 = helloworld_pb2.ModelRequest()
tmp1.ParseFromString(tmp.SerializeToString())
print('tmp1', str(tmp1))
tmp2 = helloworld_pb2.ModelRequest(data=base64.b64encode(data), a=2, b=2)
print('tmp2', str(tmp2))
# tmp3 = {'a': 1, 'b': 2, 'data': 'data'}
# print(json.dumps(tmp3))
data = helloworld_pb2.DataExample()
Blob = helloworld_pb2.DataExample.Blob
data.data = base64.b64encode(np.random.rand(1,2))
data.c = 10
data.d = 10
blob1 = data.blobs.add()
# blob1 = Blob(info='123', data=123) # wrong
blob1.info = str(123)
blob1.data = 123
data.blobs.append(Blob(info='234', data=234))
data.blobs.extend([Blob(info=str(i), data=i) for i in range(5)])
print(str(data))
|
[
"wenyu.lyu@gmail.com"
] |
wenyu.lyu@gmail.com
|
8017fb462a269f73fa8c9ffa3fee73675aa70275
|
d37a19ab3bcaba6e808a18df411c653c644d27db
|
/Year1/ca117/Lab11.1/triathlon_v3_111.py
|
448776ad2f5a0f4641f48466df4f272a8bdccb60
|
[] |
no_license
|
Andrew-Finn/DCU
|
9e7009dac9a543aaade17e9e94116259dcc1de20
|
013789e8150d80d3b3ce2c0c7ba968b2c69a7ce0
|
refs/heads/master
| 2023-02-21T05:13:42.731828
| 2022-02-14T12:39:20
| 2022-02-14T12:39:20
| 157,438,470
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
class Triathlete:
def __init__(self, name, tid):
self.name = name
self.tid = tid
self.times = {}
def __str__(self):
return "Name: {}\nID: {}\nRace time: {}".format(
self.name, self.tid, self.race_time())
def add_time(self, sport, time):
self.times[sport] = time
def get_time(self, sport):
return self.times[sport]
def race_time(self):
return sum(self.times.values())
def __lt__(self, other):
return self.race_time() < other.race_time()
def __eq__(self, other):
return self.race_time() == other.race_time()
class Triathlon:
def __init__(self):
self.athletes = []
def __str__(self):
return "\n".join(str(a) for a in sorted(self.athletes, key=lambda x: x.name))
def add(self, athlete):
self.athletes.append(athlete)
def remove(self, tid):
ind = next(a for a in self.athletes if a.tid == tid)
self.athletes.remove(ind)
def lookup(self, tid):
return next((a for a in self.athletes if a.tid == tid), None)
def best(self):
return min(self.athletes, key=lambda a: a.race_time())
def worst(self):
return max(self.athletes, key=lambda a: a.race_time())
|
[
"git@afinn.me"
] |
git@afinn.me
|
194840074149cc1c77f461d43ac7f0f971bea662
|
7698a74a06e10dd5e1f27e6bd9f9b2a5cda1c5fb
|
/pdb_search/quire_pdb_2014_mw70to250_tversky/postprocess_uniprot_pdb_lig_links_lists_max_num.py
|
e4cdbb9594d067e9ee24a33569e8d5f303c8e0d5
|
[] |
no_license
|
kingbo2008/teb_scripts_programs
|
ef20b24fe8982046397d3659b68f0ad70e9b6b8b
|
5fd9d60c28ceb5c7827f1bd94b1b8fdecf74944e
|
refs/heads/master
| 2023-02-11T00:57:59.347144
| 2021-01-07T17:42:11
| 2021-01-07T17:42:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,580
|
py
|
## Trent Balius, Shoichet group, UCSF, 2014.08.08
import urllib, urllib2, math
import scrape_pdb_for_uniprot as spfu
import scrape_pdb_for_lig_mod as spfl
import scrape_zinc_zincid as szzi
import tanimoto_cal_axon as tancal
def find_linked_list_count(lines):
list = []
sublist = []
#sublist.append(lig1)
#sublist.append(lig2)
#print "lines = ", lines
while (len(lines)>0):
#print "sublist = ",sublist
num = len(lines)
for i in range(len(lines)):
#print lines[i]
splitline = lines[i].split()
lig1 = splitline[0]
lig2 = splitline[1]
if (lig1 in sublist) and not (lig2 in sublist):
sublist.append(lig2)
del lines[i]
break
elif (lig2 in sublist) and not (lig1 in sublist):
sublist.append(lig1)
del lines[i]
break
elif ((lig2 in sublist) and (lig1 in sublist)):
del lines[i]
break
elif (len(sublist) == 0):
sublist.append(lig1)
sublist.append(lig2)
del lines[i]
break
#print num, len(lines)
if (num == len(lines) or len(lines) == 0):
#print "I AM HERE ", sublist
list.append(sublist)
sublist = []
#sublist.append(lig1)
#sublist.append(lig2)
#print "list = ",list
max = 0
for sublist in list:
#print len(sublist)
#print sublist
if (max < len(sublist)):
max = len(sublist)
return max
print " stuff that matters::::::::::"
#filein = open("uniprot_lig_tanamoto_mwdiff_formula_diff.txt",'r')
filein = open("uniprot_lig_tversky_mwdiff_formula_diff_new.txt",'r')
fileout = open("uniprot_max_linked_list_size.txt",'w')
fileout2 = open("uniprot_lig_tversky_mwdiff_formula_diff_reduced.txt",'w')
uniprot_old = ''
sublist = []
lines = []
writelines = ''
for line in filein:
if "This" in line:
continue
print line
splitline = line.split()
uniprot = splitline[0].strip(',')
lig1 = splitline[2].strip(',')
lig2 = splitline[3].strip(',')
#mfd = float(splitline[13])
mfd = float(splitline[16])
tc = float(splitline[6])
#if (mfd>4.0): # if the molecular formula difference is grater than 4 then skip the line (pair)
if (mfd>1.0): # if the molecular formula difference heavy atoms is grater than 1 then skip the line (pair)
print "skiped"
continue
#if (mfd == 0.0 and tc == 1.0):
# print "skiped because ligs are isomers"
# continue
if uniprot_old != uniprot:
max = find_linked_list_count(lines)
print uniprot_old, max
if (max > 5):
fileout.write(uniprot_old+" "+str(max)+'\n')
fileout2.write(writelines) # will right out all line the for uniprot that pass (max>5) and the mfd>4.0
lines = []
writelines = ''
uniprot_old = uniprot
lines.append(lig1+' '+lig2)
writelines=writelines+line # this will
max = find_linked_list_count(lines)
print uniprot_old, max
if (max > 5):
fileout.write(uniprot_old+" "+str(max)+'\n')
fileout2.write(writelines) # will right out all line the for uniprot that pass (max>5) and the mfd>4.0
filein.close()
fileout.close()
fileout2.close()
|
[
"tbalius@gimel.cluster.ucsf.bkslab.org"
] |
tbalius@gimel.cluster.ucsf.bkslab.org
|
8daa3a2822a6f1ca6fea3e85851a2bdb5803592e
|
c053101986b0c9884c2003ec1c86bdf69c41d225
|
/normal-tips/WordCloud/CN_cloud.py
|
9bb43bd4f2992c1810594970824ab5e29931011d
|
[] |
no_license
|
JZDBB/Python-ZeroToAll
|
4c32a43044a581941943f822e81fa37df6ef8dd4
|
67a1e9db0775d897dcc5c0d90f663de9788221a3
|
refs/heads/master
| 2021-05-09T18:36:45.358508
| 2019-11-01T08:15:54
| 2019-11-01T08:15:54
| 119,166,874
| 0
| 0
| null | 2019-10-29T20:59:04
| 2018-01-27T13:29:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,430
|
py
|
# # -*- coding: utf-8 -*-
# import jieba
# import os
# import codecs
# from scipy.misc import imread
# import matplotlib as mpl
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
#
#
# class GetWords(object):
# def __init__(self, dict_name, file_list, dic_list):
# self.dict_name = dict_name
# self.file_list = file_list
# self.dic_list = dic_list
#
# # 获取自定义词典
# def get_dic(self):
# dic = open(self.dict_name, 'r')
# while 1:
# line = dic.readline().strip()
# self.dic_list.append(line)
# if not line:
# break
# pass
#
# def get_word_to_cloud(self):
# for file in self.file_list:
# with codecs.open('../spider/' + file, "r", encoding='utf-8', errors='ignore') as string:
# string = string.read().upper()
# res = jieba.cut(string, HMM=False)
# reslist = list(res)
# wordDict = {}
# for i in reslist:
# if i not in self.dic_list:
# continue
# if i in wordDict:
# wordDict[i] = wordDict[i] + 1
# else:
# wordDict[i] = 1
#
# coloring = imread('test.jpeg')
#
# wc = WordCloud(font_path='msyh.ttf', mask=coloring,
# background_color="white", max_words=50,
# max_font_size=40, random_state=42)
#
# wc.generate_from_frequencies(wordDict)
#
# wc.to_file("%s.png" % (file))
#
#
# def set_dic():
# _curpath = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# settings_path = os.environ.get('dict.txt')
# if settings_path and os.path.exists(settings_path):
# jieba.set_dictionary(settings_path)
# elif os.path.exists(os.path.join(_curpath, 'data/dict.txt.big')):
# jieba.set_dictionary('data/dict.txt.big')
# else:
# print("Using traditional dictionary!")
#
#
# if __name__ == '__main__':
# set_dic()
# file_list = ['data_visualize.txt', 'data_dev.txt', 'data_mining.txt', 'data_arc.txt', 'data_analysis.txt']
# dic_name = 'dict.txt'
# dic_list = []
# getwords = GetWords(dic_name, file_list, dic_list)
# getwords.get_dic()
# getwords.get_word_to_cloud()
# -*-coding:utf-8-*-
###生成txt文件的词云
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import jieba
from PIL import Image
import numpy as np
import os
from os import path
text = open("fenciHou1.txt", "rb").read().decode('utf-8')
# text = open("cv.txt", "rb").read()
# 结巴分词
# wordlist = jieba.cut(text, cut_all=True)
# wl = " ".join(wordlist)
# print(wl)#输出分词之后的txt
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
background_Image = np.array(Image.open(path.join(d, "1545566997_781739.png")))
# or
# background_Image = imread(path.join(d, "mask1900.jpg"))
# 提取背景图片颜色
img_colors = ImageColorGenerator(background_Image)
# 把分词后的txt写入文本文件
# fenciTxt = open("fenciHou.txt","w+", encoding='utf-8')
# fenciTxt.writelines(wl)
# fenciTxt.close()
# 设置词云
wc = WordCloud(background_color="black", # 设置背景颜色
margin = 2, # 设置页面边缘
mask=background_Image,# mask = "图片", #设置背景图片
max_words=500, # 设置最大显示的字数
# stopwords = "", #设置停用词
font_path="fangsong_GB2312.ttf",
# 设置中文字体,使得词云可以显示(词云默认字体是“DroidSansMono.ttf字体库”,不支持中文)
max_font_size=200, # 设置字体最大值
min_font_size=5, # 最小字号
collocations=False, # 不重复显示词语
colormap='viridis', # matplotlib 色图,可更改名称进而更改整体风格
random_state=30, # 设置有多少种随机生成状态,即有多少种配色方案
mode='RGB'
)
myword = wc.generate(text) # 生成词云
wc.recolor(color_func=img_colors)
#存储图像
wc.to_file('12.png')
# 展示词云图
plt.imshow(myword)
plt.axis("off")
plt.show()
|
[
"oxuyining@gmail.com"
] |
oxuyining@gmail.com
|
6e56f5fe3ef0c5ddaeeabc6fa8213b24e7c6e253
|
6b7ff6d09f1da2793f4cc2d3f319256580fbae0c
|
/astroquery/vo_conesearch/async.py
|
571c6d6d91008c9db5a4f15c4ef5f22b917ec926
|
[] |
no_license
|
AyushYadav/astroquery
|
39e4e696441fdfe84e9e66cf905f6febde3080a3
|
4645d11f56f96404870d284603c024a3de0d8198
|
refs/heads/master
| 2020-05-18T13:02:06.068072
| 2017-05-21T17:16:15
| 2017-05-21T17:16:15
| 86,921,243
| 1
| 0
| null | 2017-04-01T15:05:25
| 2017-04-01T15:05:25
| null |
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Asynchronous VO service requests."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# ASTROPY
from astropy.utils.compat.futures import ThreadPoolExecutor
__all__ = ['AsyncBase']
class AsyncBase(object):
"""Base class for asynchronous VO service requests
using :py:class:`concurrent.futures.ThreadPoolExecutor`.
Service request will be forced to run in silent
mode by setting ``verbose=False``. Warnings are controlled
by :py:mod:`warnings` module.
.. note::
Methods of the attributes can be accessed directly,
with priority given to ``executor``.
Parameters
----------
func : function
The function to run.
args, kwargs
Arguments and keywords accepted by the service request
function to be called asynchronously.
Attributes
----------
executor : :py:class:`concurrent.futures.ThreadPoolExecutor`
Executor running the function on single thread.
future : :py:class:`concurrent.futures.Future`
Asynchronous execution created by ``executor``.
"""
def __init__(self, func, *args, **kwargs):
kwargs['verbose'] = False
self.executor = ThreadPoolExecutor(1)
self.future = self.executor.submit(func, *args, **kwargs)
def __getattr__(self, what):
"""Expose ``executor`` and ``future`` methods."""
try:
return getattr(self.executor, what)
except AttributeError:
return getattr(self.future, what)
def get(self, timeout=None):
"""Get result, if available, then shut down thread.
Parameters
----------
timeout : int or float
Wait the given amount of time in seconds before
obtaining result. If not given, wait indefinitely
until function is done.
Returns
-------
result
Result returned by the function.
Raises
------
Exception
Errors raised by :py:class:`concurrent.futures.Future`.
"""
try:
result = self.future.result(timeout=timeout)
finally:
self.executor.shutdown(wait=False)
return result
|
[
"lim@stsci.edu"
] |
lim@stsci.edu
|
72e68a94ce144b60b07b3ec9392dddb8e4c927c4
|
3a02ec695af95e5d5f5c4f14ab393cd2ad709be3
|
/kochat/model/fallback/__init__.py
|
748d571c72c6d0720a72f9bead1e08855a573784
|
[
"Apache-2.0"
] |
permissive
|
seunghyunmoon2/kochat
|
50f0db168ca5163e6926331d8d81ebf3a26e4f7e
|
f5a5df38c7c24080855f9279450195bc0a8eae74
|
refs/heads/master
| 2022-12-23T17:07:49.856243
| 2020-10-02T06:01:06
| 2020-10-02T06:01:06
| 278,004,648
| 0
| 0
|
Apache-2.0
| 2020-07-08T06:08:38
| 2020-07-08T06:08:38
| null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
"""
@auther Hyunwoong
@since 6/28/2020
@see https://github.com/gusdnd852
"""
from kochat.model.intent.cnn import CNN
from kochat.model.intent.lstm import LSTM
__ALL__ = [CNN, LSTM]
|
[
"gusdnd852@naver.com"
] |
gusdnd852@naver.com
|
33ea27c8dee8244bf11dc728aa76c3f6757fe576
|
b1aa3c599c5d831444e0ae4e434f35f57b4c6c45
|
/month1/week4/python_class11/main_operate.py
|
984d84fa3373dc4dd1f9d5a380f1fe28a39feca9
|
[] |
no_license
|
yunyusha/xunxibiji
|
2346d7f2406312363216c5bddbf97f35c1e2c238
|
f6c3ffb4df2387b8359b67d5e15e5e33e81e3f7d
|
refs/heads/master
| 2020-03-28T12:31:17.429159
| 2018-09-11T11:35:19
| 2018-09-11T11:35:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
"""Main Operate
Usage:
main_operate.py [-gkdtz] <from> <to> <time>
main_operate.py [-GDKTZ] <from> <to> <time>
Options:
"""
from docopt import docopt
from tickets import Ticket
import time
if __name__ == "__main__":
arguments = docopt(__doc__)
# 根据用户输入的选择项完成对应类别列车的信息查询
kinds = []
if arguments.get("-g") is True or arguments.get("-G") is True:
kinds.append('G')
if arguments.get("-k") is True or arguments.get("-K") is True:
kinds.append("K")
if arguments.get("-d") is True or arguments.get("-D") is True:
kinds.append("D")
if arguments.get("-t") is True or arguments.get("-T") is True:
kinds.append("T")
if arguments.get("-z") is True or arguments.get("-Z") is True:
kinds.append("Z")
if len(kinds) == 0:
kinds = ["K","G", "D", "T", "Z"]
# 获取当前程序运行时的时间
time_date = time.strftime("%Y-%m-%d", time.localtime())
# 获取用户通过终端输入的时间
time_input = arguments.get("<time>").split('-')
# 将用户输入的日期变成两位表示
time_input = list(map(lambda x: ("0"+x) if len(x) == 1 else x, time_input))
time_input = "-".join(time_input)
# 判断用户输入的时间是否超出12306查询时间
if time_input >= time_date:
dic = {'from':arguments.get('<from>'), "to":arguments.get('<to>'),'time':time_input}
ticket = Ticket(**dic)
ticket.check_ticket()
ticket.check_train(kinds)
else:
print('对不起您所查询的列车时间不在规定时间之内')
|
[
"576462286@qq.com"
] |
576462286@qq.com
|
a8266a0805a71be951a9468186ac375deefcf2a6
|
ce6f8510f6a2fd48b7037c1c8448f719fd54f8b4
|
/piecrust/admin/scm/git.py
|
b5ae5a0695d49f6903c42acf33f94be881e9eef4
|
[
"Apache-2.0"
] |
permissive
|
ludovicchabant/PieCrust2
|
bd014f8aa880ec2b2360a298263d2de279d4252f
|
ebb567b577cbd2efb018183b73eff05c7a12c318
|
refs/heads/master
| 2023-01-09T17:09:56.991209
| 2022-12-31T00:48:04
| 2022-12-31T00:48:04
| 23,298,052
| 46
| 9
|
NOASSERTION
| 2022-12-31T01:05:07
| 2014-08-25T01:29:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
import os
import logging
import tempfile
import subprocess
from .base import SourceControl, RepoStatus, _s
logger = logging.getLogger(__name__)
class GitSourceControl(SourceControl):
def __init__(self, root_dir, cfg):
super(GitSourceControl, self).__init__(root_dir, cfg)
self.git = cfg.get('exe', 'git')
def getStatus(self):
res = RepoStatus()
st_out = self._run('status', '-s')
for line in st_out.split('\n'):
if not line:
continue
if line.startswith('?? '):
path = line[3:].strip()
if path[-1] == '/':
import glob
res.new_files += [
f for f in glob.glob(path + '**', recursive=True)
if f[-1] != '/']
else:
res.new_files.append(path)
elif line.startswith(' M '):
res.edited_files.append(line[3:])
return res
def _doCommit(self, paths, message, author):
self._run('add', *paths)
# Create a temp file with the commit message.
f, temp = tempfile.mkstemp()
with os.fdopen(f, 'w') as fd:
fd.write(message)
# Commit and clean up the temp file.
try:
commit_args = list(paths) + ['-F', temp]
if author:
commit_args += ['--author="%s"' % author]
self._run('commit', *commit_args)
finally:
os.remove(temp)
def _run(self, cmd, *args, **kwargs):
exe = [self.git]
exe.append(cmd)
exe += args
logger.debug("Running Git: " + str(exe))
proc = subprocess.Popen(
exe, stdout=subprocess.PIPE, cwd=self.root_dir)
out, _ = proc.communicate()
encoded_out = _s(out)
return encoded_out
|
[
"ludovic@chabant.com"
] |
ludovic@chabant.com
|
e5ec008185718edf0ec86d4b41829b57366e0471
|
e75751f44e1f38eede027143fd43b83606e4cb13
|
/proyecto_final/apps/tasks/models.py
|
51a4cd65c5845e55b67bf2ac51b036137c6e9f52
|
[] |
no_license
|
stephfz/exam_prep_cd-django
|
8fef23c4ae1639c213b2d4a143fe1f5996d3b315
|
4633d65b4d86e29fff3f1c9fd09d690d9416f2f2
|
refs/heads/master
| 2023-05-10T14:55:15.249503
| 2021-05-25T16:11:38
| 2021-05-25T16:11:38
| 369,047,889
| 0
| 1
| null | 2021-06-15T00:31:31
| 2021-05-20T01:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
from django.db import models
import re
from django.core import validators
from django.core.exceptions import ValidationError
from django.contrib.auth.hashers import check_password, make_password
MIN_FIELD_LENGHT = 4
def ValidarLongitudMinima(cadena):
if len(cadena) < MIN_FIELD_LENGHT:
raise ValidationError(
f"{cadena} tiene deberia tener mas de {MIN_FIELD_LENGHT} caracteres"
)
def ValidarEmail(cadena):
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(cadena):
raise ValidationError(
f'{cadena} no es un e-mail valido'
)
class User(models.Model):
name = models.CharField(max_length=45, blank = False, null =False, validators=[ValidarLongitudMinima])
lastname = models.CharField(max_length=45, blank = False, null = False , validators=[ValidarLongitudMinima])
email = models.CharField(max_length=50, validators=[ValidarEmail])
password = models.CharField(max_length=20, blank=False)
fecha_nacimiento = models.DateField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.password = make_password(self.password)
super(User, self).save(*args, **kwargs)
@staticmethod # User.authenticate('s', 'p')
#sin decorador, userObj = User() userObj.authenticate(s,s)
def authenticate(email, password):
results = User.objects.filter(email = email)
if len(results) == 1:
user = results[0]
bd_password = user.password
if check_password(password, bd_password):
return user
return None
class Task(models.Model):
name = models.CharField(max_length=45, blank = False, null =False, validators=[ValidarLongitudMinima])
due_date = models.DateField()
completed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name="tasks", on_delete=models.CASCADE)
|
[
"stephfz@gmail.com"
] |
stephfz@gmail.com
|
4ad5a935680d32c5f47db945400a93b5d7813a54
|
7235051c8def972f3403bf10155e246c9c291c58
|
/angola_erp/oficinas/doctype/ordem_de_reparacao/test_ordem_de_reparacao.py
|
961f28830c5549ca52ea27afe99aed3a83b82258
|
[
"MIT"
] |
permissive
|
proenterprise/angola_erp
|
8e79500ce7bcf499fc344948958ae8e8ab12f897
|
1c171362b132e567390cf702e6ebd72577297cdf
|
refs/heads/master
| 2020-06-03T08:51:34.467041
| 2019-06-07T01:35:54
| 2019-06-07T01:35:54
| 191,514,859
| 1
| 0
|
NOASSERTION
| 2019-06-12T06:53:41
| 2019-06-12T06:53:41
| null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Helio de Jesus and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestOrdemdeReparacao(unittest.TestCase):
pass
|
[
"hcesar@gmail.com"
] |
hcesar@gmail.com
|
6bf991a763e479c7e1cf1405735bd5a5b5bd5e30
|
14f455693213cae4506a01b7d0591e542c38de79
|
/vendor/python-munin/plugins/aws_elb_requests
|
b846f9ba72903b7127b9edce58997a3bb6785c5e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"X11-distribute-modifications-variant"
] |
permissive
|
Cvalladares/Newsblur_Instrumented
|
f0b14d063759973330f202108a7eed3a29bcc033
|
4d6ee6aa9713879b1e2550ea5f2dbd819c73af12
|
refs/heads/master
| 2022-12-29T15:19:29.726455
| 2019-09-03T17:09:04
| 2019-09-03T17:09:04
| 206,130,022
| 0
| 0
|
MIT
| 2022-12-10T06:00:26
| 2019-09-03T17:07:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,411
|
#!/usr/bin/env python
import datetime
import os
import sys
import boto
from boto.ec2.cloudwatch import CloudWatchConnection
from vendor.munin import MuninPlugin
class AWSCloudWatchELBRequestsPlugin(MuninPlugin):
category = "AWS"
args = "-l 0 --base 1000"
vlabel = "Requests/sec"
info = "Show number of requests per second"
@property
def title(self):
return "Requests/sec for ELBs '%s'" % ",".join(self.elb_names)
@property
def fields(self):
return [
(n, dict(
label = "requests on ELB %s" % n,
type = "ABSOLUTE",
)) for n in self.elb_names
]
def __init__(self):
self.api_key = os.environ['AWS_KEY']
self.secret_key = os.environ['AWS_SECRET']
self.elb_names = (sys.argv[0].rsplit('_', 1)[-1] or os.environ['ELB_NAME']).split(',')
def execute(self):
minutes = 5
end_date = datetime.datetime.utcnow()
start_date = end_date - datetime.timedelta(minutes=minutes)
cw = CloudWatchConnection(self.api_key, self.secret_key)
return dict(
(n, sum(x['Sum'] for x in cw.get_metric_statistics(60, start_date, end_date, "RequestCount", "AWS/ELB", ["Sum"])))
for n in self.elb_names
)
if __name__ == "__main__":
AWSCloudWatchELBRequestsPlugin().run()
|
[
"Cvalladares4837@gmail.com"
] |
Cvalladares4837@gmail.com
|
|
56912e1ee6caa7808dea0ddd13b2516f89961713
|
1c751c001357d23fe10e7a42490e3b76434dfa18
|
/tools/py/ss.py
|
06e26a8a47b602d63c9c69c1a72b29e584944444
|
[] |
no_license
|
pie-crust/etl
|
995925199a71b299544bfac1ed8f504f16fbadc2
|
14b19b542eaa69b8679ce7df4d9a5d2720b3c5c7
|
refs/heads/master
| 2022-12-12T18:40:31.866907
| 2019-10-14T15:46:16
| 2019-10-14T15:46:16
| 215,082,544
| 0
| 0
| null | 2022-12-08T05:22:54
| 2019-10-14T15:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,942
|
py
|
import os, sys, io, csv, time, boto, gzip, math
import pyodbc
e=sys.exit
from pprint import pprint as pp
try:
from io import BytesIO as cStringIO
except:
try:
import cStringIO
except ImportError:
import io as cStringIO
def setKeytabCache(keyTabFile, keyTabPrincipal='',isVertica=True):
DEFAULT_DOMAIN = dbenvars.get('DEFAULT_DOMAIN'); assert DEFAULT_DOMAIN
if isVertica:
if keyTabFile != '':
verticakeyTabPrincipal = dbenvars.get('DB_SERVER') + '@' + DEFAULT_DOMAIN
os.system("kinit -k -t {} {}".format(keyTabFile, verticakeyTabPrincipal))
else:
message="keyTabFile {} not defined. Check environ variable KRB5_CLIENT_KTNAME".format(keyTabFile)
print 'ERROR', message
raise Exception(message)
else:
if keyTabFile != '' and keyTabPrincipal != '':
os.system("kinit -k -t {} {}".format(keyTabFile, keyTabPrincipal))
else:
message="keyTabFile {} or keyTabPrincipal not defined. Check environ variable KRB5_CLIENT_KTNAME".format(keyTabFile,keyTabPrincipal)
print 'ERROR', message
raise Exception(message)
encoding = 'utf-8'
write_file='_sqldata.csv'
stream = io.BytesIO()
#line_as_list = [line.encode(encoding) for line in line_as_list]
dbenvars={ 'DB_SERVER':'MDDATAMART1\MDDATAMART1', 'DEFAULT_DOMAIN':"homeGROUP.COM"}
def insert_data(data):
if 1:
stmt="INSERT INTO ACCOUNTINGBI.POSITION.DY_FiccDistribution (TransactionId,SettleDate,TransactionTypeCode,ClearedDate,CloseDate,CloseLeg, QuantityType,Quantity,AccountingDate,AsOfDateTime) values (?,?,?,?,?,?,?,?,?,?)"
#CAST(? AS DATE),CAST(? AS TIMESTAMP))
#tcur.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)])
tcur.fast_executemany = True
tcur.executemany(stmt, data)
e()
import datetime
def insert_data_2(data):
if 1:
stmt="INSERT INTO ACCOUNTINGBI.POSITION.DY_FiccDistribution (TransactionId,SettleDate,TransactionTypeCode,ClearedDate,CloseDate,CloseLeg, QuantityType,Quantity,AccountingDate,AsOfDateTime) values %s"
#tcur.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)])
#tcur.fast_executemany = True
out=[]
for row in data:
tmp=[str(x) if isinstance(x, datetime.date) else x for x in list(row)]
out.append('^'.join(["'1971-01-01'" if x==None else str(x) if isinstance(x, int) else "'%s'" % x for x in tmp]))
if out:
stmt="INSERT INTO ACCOUNTINGBI.POSITION.DY_FiccDistribution values (%s)" % '),\n('. join (out)
#print(stmt)
#e()
tcur.execute(stmt)
tcur.execute('commit')
print tcur.rowcount
def get_cnt(cur,tab):
cur.execute("SELECT count(1) from %s" % tab)
return cur.fetchone()[0]
rid=0
file_rows=25000 #16384
s3_rows=10000
def s3_upload_rows( bucket, s3_key, data, suffix='.gz' ):
rid=0
assert data
key = s3_key +suffix
use_rr=False
mpu = bucket.initiate_multipart_upload(key,reduced_redundancy=use_rr , metadata={'header':'test'})
stream = cStringIO()
compressor = gzip.GzipFile(fileobj=stream, mode='wb')
uploaded=0
#@timeit
def uploadPart(partCount=[0]):
global total_comp
partCount[0] += 1
stream.seek(0)
mpu.upload_part_from_file(stream, partCount[0])
total_comp +=stream.tell()
stream.seek(0)
stream.truncate()
#@timeit
def upload_to_s3():
global total_size,total_comp, rid
i=0
while True: # until EOF
i+=1
start_time = time.time()
chunk=''
#pp(data[0])
tmp=[]
if rid<len(data):
tmp= data[rid:][:s3_rows]
chunk=os.linesep.join(tmp)+os.linesep
#print rid, len(chunk), len(data)
rid +=len(tmp)
if not chunk: # EOF?
compressor.close()
uploadPart()
mpu.complete_upload()
break
else:
if sys.version_info[0] <3 and isinstance(chunk, unicode):
compressor.write(chunk.encode('utf-8'))
else:
compressor.write(chunk)
total_size +=len(chunk)
if stream.tell() > 10<<20: # min size for multipart upload is 5242880
uploadPart()
upload_to_s3()
return key
def convertSize( size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '%s %s' % (s,size_name[i])
tbl='DY_FICCDISTRIBUTION'
stg='POSITION_MODEL_STAGE_TEST_2'
sch='ACCOUNTINGBI.POSITION'
wrh='LOAD_WH'
def bulk_copy(cur, file_names):
global tbl, stg, sch, LOAD_WH
assert tbl and stg and sch and wrh
assert len(file_names)
files="','".join(file_names)
before=get_cnt(cur,tbl)
start_time=time.time()
if 1:
cmd="""
COPY INTO
%s
FROM '@%s/%s/'
FILES=('%s')
""" % (tbl,stg, 'DEMO', files)
if 1:
cur.execute("USE WAREHOUSE %s" % wrh)
cur.execute("USE SCHEMA %s" % sch)
try:
out=cur.execute(cmd)
except:
print(cmd)
raise
pp(out)
match=0
for id, row in enumerate(cur.fetchall()):
status, cnt = row[1:3]
print('%s: Insert #%d, status: [%s], row count: [%s]' % ('DEMO', id, status, cnt))
if status not in ['LOADED']:
match +=1
if match:
raise Exception('Unexpected load status')
cur.execute("commit")
after=get_cnt(cur,tbl)
print 'Rows inserted: ', after-before
sec=round((time.time() - start_time),2)
if __name__=="__main__":
if 1:
pyodbc.pooling = False
if 0:
keyTabFile=os.getenv('SSRSREPORTINGKEYTABFILE'); assert keyTabFile
keyTabPrincipal=os.getenv('DATASTAGINGSQLUSER'); assert keyTabPrincipal
#setKeytabCache(KEYTABFILE, os.getenv('DATASTAGINGSQLUSER'), None)
print ("kinit -k -t {} {}".format(keyTabFile, keyTabPrincipal))
#e()
os.system("kinit -k -t {} {}".format(keyTabFile, keyTabPrincipal))
connS='DSN=MDIR01;Database=DataStaging;Trusted_Connection=yes;POOL=0;App=FiccApi'
connS='DSN=MDDATAMART1;Database=Accounting;Trusted_Connection=yes;POOL=0;App=PositionReader'
sconn = pyodbc.connect(connS)
scur = sconn.cursor()
scur.arraysize=file_rows
#scur.setinputsizes([(pyodbc.SQL_WVARCHAR, 0, 0)])
if 1:
stmt="SELECT COUNT(*) from DY_FiccDistribution"
scur.execute(stmt)
|
[
"olek.buzu@gmail.com"
] |
olek.buzu@gmail.com
|
de2c57db3df051e2c71ce0c65bb11785838de827
|
51f6443116ef09aa91cca0ac91387c1ce9cb445a
|
/Curso_de_Python_ Curso_em_Video/PythonTeste/tuplasEx006.py
|
7390994256a02018fcb186b5a803af55fff0bb07
|
[
"MIT"
] |
permissive
|
DanilooSilva/Cursos_de_Python
|
f449f75bc586f7cb5a7e43000583a83fff942e53
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
refs/heads/main
| 2023-07-30T02:11:27.002831
| 2021-10-01T21:52:15
| 2021-10-01T21:52:15
| 331,683,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
nomes = ('Danilo', 'Maria', 'Scarlett', 'Ohara', 'Allanys', 'Mel', 'Ze Russo')
for nome in nomes:
print(f'No nome {nome.upper()} temos as vogais', end=' ')
for vogais in nome:
if vogais in 'aAeEiIoOuU':
print(vogais, end=' ')
print()
|
[
"dno.gomesps@gmail.com"
] |
dno.gomesps@gmail.com
|
16a554411b6c2550251a1231746f75bf6f984b71
|
7e806feae66ff77601a9f19dad4d4a4a8d774c88
|
/server/api/migrations/0018_auto_20191110_1617.py
|
4f9ad449149555622bb9070600a9cfa16d1eae8f
|
[] |
no_license
|
admiralbolt/lorebook
|
bfc71fa39285a51bce70e0544aceca55db1132f4
|
6e5614796d4eccc696908053c5bc22950a8e6a8c
|
refs/heads/master
| 2022-12-28T20:12:31.229136
| 2021-01-24T22:02:13
| 2021-01-24T22:02:13
| 215,921,497
| 1
| 0
| null | 2022-12-11T09:51:53
| 2019-10-18T02:03:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
# Generated by Django 2.2.7 on 2019-11-10 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0017_auto_20191110_1610'),
]
operations = [
migrations.AlterField(
model_name='lore',
name='date_received',
field=models.DateField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='lore',
name='date_written',
field=models.DateField(blank=True, default=None, null=True),
),
]
|
[
"aviknecht@gmail.com"
] |
aviknecht@gmail.com
|
ca8506bd95dca5bde283c3376e5c95788a982b55
|
b7fab13642988c0e6535fb75ef6cb3548671d338
|
/tools/ydk-py-master/openconfig/ydk/models/openconfig/ietf_diffserv_action.py
|
293a2b138c9c7712e56f8bfd4726376e73af48fb
|
[
"Apache-2.0"
] |
permissive
|
juancsosap/yangtraining
|
6ad1b8cf89ecdebeef094e4238d1ee95f8eb0824
|
09d8bcc3827575a45cb8d5d27186042bf13ea451
|
refs/heads/master
| 2022-08-05T01:59:22.007845
| 2019-08-01T15:53:08
| 2019-08-01T15:53:08
| 200,079,665
| 0
| 1
| null | 2021-12-13T20:06:17
| 2019-08-01T15:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,114
|
py
|
""" ietf_diffserv_action
This module contains a collection of YANG definitions for configuring
diffserv specification implementations. Copyright (c) 2014 IETF
Trust and the persons identified as authors of the code. All rights
reserved. Redistribution and use in source and binary forms, with
or without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents (http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC XXXX; see the
RFC itself for full legal notices.
"""
from ydk.entity_utils import get_relative_entity_path as _get_relative_entity_path
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YPYError, YPYModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Meter(Identity):
"""
meter action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(Meter, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:meter")
class DropType(Identity):
"""
drop algorithm
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(DropType, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:drop-type")
class MinRate(Identity):
"""
min\-rate action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(MinRate, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:min-rate")
class Priority(Identity):
"""
priority action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(Priority, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:priority")
class MaxRate(Identity):
"""
max\-rate action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(MaxRate, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:max-rate")
class MeterActionType(Identity):
"""
action type in a meter
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(MeterActionType, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:meter-action-type")
class Marking(Identity):
"""
marking action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(Marking, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:marking")
class AlgorithmicDrop(Identity):
"""
algorithmic\-drop action type
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(AlgorithmicDrop, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:algorithmic-drop")
class MeterActionSet(Identity):
"""
mark action type in a meter
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(MeterActionSet, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:meter-action-set")
class RandomDetect(Identity):
"""
random detect algorithm
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(RandomDetect, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:random-detect")
class MeterActionDrop(Identity):
"""
drop action type in a meter
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(MeterActionDrop, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:meter-action-drop")
class AlwaysDrop(Identity):
"""
always drop algorithm
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(AlwaysDrop, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:always-drop")
class TailDrop(Identity):
"""
tail drop algorithm
"""
_prefix = 'action'
_revision = '2015-04-07'
def __init__(self):
super(TailDrop, self).__init__("urn:ietf:params:xml:ns:yang:ietf-diffserv-action", "ietf-diffserv-action", "ietf-diffserv-action:tail-drop")
|
[
"juan.c.sosa.p@gmail.com"
] |
juan.c.sosa.p@gmail.com
|
bbb370b912be9dd1d301c91a2df51194eab247b0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02647/s288463755.py
|
23233f7ddad32a580359992f962a8751dd84649d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
n, k = map(int, input().split())
alst = list(map(int, input().split()))
for _ in range(k):
tmp = [0 for _ in range(n + 1)]
for i, num in enumerate(alst):
min_ind = max(i - num, 0)
max_ind = min(i + num + 1, n)
tmp[min_ind] += 1
tmp[max_ind] -= 1
next_num = 0
for i, num in enumerate(tmp[:-1]):
next_num += num
alst[i] = next_num
if tmp[0] == n and tmp[-1] == -n:
break
print(*alst)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
93c1cfcec3bc7b70190f5138f4b8fa6e97a0ff16
|
2c989707a10e65c115eff8bbab0f51e510ccf096
|
/PythonAdvance/1004pyad.py
|
549187a006765355fd624a4fc305df2d8a6dfbba
|
[] |
no_license
|
qiwsir/LearningWithLaoqi
|
42041eccb40788f573485209c82aa5e00549408a
|
4c5df8d6f7638ba2ef5ea68b3b169aa4065b43c2
|
refs/heads/master
| 2021-05-01T19:13:28.686452
| 2018-04-03T11:45:21
| 2018-04-03T11:45:21
| 121,016,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
#coding:utf-8
class CountWord(dict):
def add(self, item, increment=1):
self[item] = increment + self.get(item, 0)
def sorts(self, reverse=False):
lst = [ (self[k], k) for k in self ]
lst.sort()
if reverse:
lst.reverse()
return [(v, k) for k, v in lst]
if __name__ == "__main__":
s = 'You raise me up, so I can stand on mountains,\
You raise me up to walk on stormy seas,\
I am strong when I am on your shoulders,\
You raise me up to more than I can be.'
words = s.split()
c = CountWord()
for word in words:
c.add(word)
print("从小到大")
print(c.sorts())
print("从大到小")
print(c.sorts(reverse=True))
|
[
"qiwsir@gmail.com"
] |
qiwsir@gmail.com
|
11815ae3e42c0042d8086fa2b5a0924fcb8f1998
|
c29de305e7923acfa6a49a852d730ac607198446
|
/ng_opcserver/ng_opcserver.py
|
92a6b315f3ab0d29c4a9d2d8c3db8fd92811e7c7
|
[
"MIT"
] |
permissive
|
jmosbacher/ng-opcua
|
712009cf50e2292abacbe4da269b934c5cfb3bcf
|
3a3030a4230a4807b603262f19f66c99a27f75cc
|
refs/heads/master
| 2022-12-16T21:18:02.518113
| 2020-09-24T14:18:31
| 2020-09-24T14:18:31
| 298,301,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,098
|
py
|
# -*- coding: utf-8 -*-
from asyncua import ua, Server
from asyncua.common import node
from asyncua.common.methods import uamethod
from enum import IntEnum
import asyncio
import random
import logging
import time
# Not required just for convenience
# Because this example is based on EnumStrings, the values should start at 0 and no gaps are allowed.
class GeneratorState(IntEnum):
off = 0 # No communication
idle = 1 # Communication established, run settings not set
ready = 2 #run settings set, ready to start running
running = 3 # producing neutrons
# helper method to automatically create string list
def enum_to_stringlist(a_enum):
items = []
for value in a_enum:
items.append(ua.LocalizedText(value.name))
return items
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger('asyncua')
@uamethod
def func(parent, value):
return value * 2
async def serve_state(duration=None, frequency=1, debug=False):
server = Server()
await server.init()
server.set_endpoint("opc.tcp://0.0.0.0:4840/pulsedng/")
# setup our own namespace, not really necessary but should as spec
uri = "http://pulsedng.xenon-sc.lngs.infn.it"
nsidx = await server.register_namespace(uri)
# --------------------------------------------------------
# create custom enum data type
# --------------------------------------------------------
enums = await server.get_root_node().get_child(["0:Types", "0:DataTypes", "0:BaseDataType", "0:Enumeration"])
# 1.
# Create Enum Type
GeneratorState_type = await enums.add_data_type(nsidx, 'GeneratorState')
# Or convert the existing IntEnum GeneratorState
es = await GeneratorState_type.add_property(0, "EnumStrings" , enum_to_stringlist(GeneratorState))
await es.set_value_rank(1)
await es.set_array_dimensions([0])
# --------------------------------------------------------
# create object with enum variable
# --------------------------------------------------------
# get Objects node, this is where we should put our custom stuff
objects = server.get_objects_node()
# create object
myobj = await objects.add_object(nsidx, 'GeneratorObject')
# add var with as type the custom enumeration
GeneratorState_var = await myobj.add_variable(nsidx, 'GeneratorState2Var', GeneratorState.off, datatype = GeneratorState_type.nodeid)
await GeneratorState_var.set_writable()
await GeneratorState_var.set_value(GeneratorState.idle) # change value of enumeration
_logger.info('Starting server!')
async with server:
while True:
for state in GeneratorState:
await asyncio.sleep(2)
_logger.info('Set value of %s to %d', GeneratorState_var, state)
await GeneratorState_var.set_value(state)
def run_server(duration, frequency, debug):
loop = asyncio.get_event_loop()
loop.set_debug(debug)
loop.run_until_complete(serve_state(duration, frequency, debug))
loop.close()
if __name__ == "__main__":
run_server(100, 1, True)
|
[
"joe.mosbacher@gmail.com"
] |
joe.mosbacher@gmail.com
|
1825d7f8f1f9e66ba3b9c2f500fc015db148d39c
|
d60e74dae2c4bcef6bc7c8faea51dc6b245de42f
|
/package/inference/gauss/__init__.py
|
06c798718238f04663af5ea96ccdbc202152a445
|
[] |
no_license
|
tloredo/inference
|
37664ef62317f32ad5ab25c56ead1c49bfc91045
|
215de4e93b5cf79a1e9f380047b4db92bfeaf45c
|
refs/heads/master
| 2021-09-09T06:24:16.690338
| 2021-09-01T21:03:52
| 2021-09-01T21:03:52
| 142,254,094
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
"""
gauss: Modules for inference tasks using the Gaussian (normal) distribution.
"""
from . import vecba
__all__ = ['vecba']
|
[
"loredo@astro.cornell.edu"
] |
loredo@astro.cornell.edu
|
9fbf579ea2337d37e373d71d47d4ef7a48cbef9a
|
c385cc6c6d69cadfe23664368e592f62e1e2d390
|
/Tree/剑指 Offer 26. 树的子结构.py
|
69ec6412c750ce3ed8248740308e3370c5da163a
|
[] |
no_license
|
baketbek/AlgorithmPractice
|
c5801a374491a2d813fb7504a84aff6a50fc11ab
|
5a412bef8602097af43a9134389d334e6f5fa671
|
refs/heads/master
| 2023-05-23T23:50:25.798825
| 2021-06-19T13:58:45
| 2021-06-19T13:58:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
这么写有问题,因为recur函数里,如果AB值相等就继续对比,进入下一层后如果不相等,就跳过这层,
在下层对比A.left.left和B.left,是不对的,应该有一个不等了就从头开始,我这个写法跳了一层依然在对比当前的点而非回到头部
class Solution:
def isSubStructure(self, A: TreeNode, B: TreeNode) -> bool:
def recur(A, B):
if B is None:
return True
if A is None:
return False
if A.val == B.val:
return recur(A.left, B.left) and recur(A.right, B.right)
else:
return recur(A.left, B) or recur(A.right, B)
if A is None or B is None:
return False
return recur(A, B)
为啥要另外再定义一个recur?
这个题的思路是先遍历A的每个节点,然后以每个节点为根判断是否存在和B一致的子树
遍历A的每个节点要用外层定义的函数,判断是否存在子树另外def了recur
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSubStructure(self, A: TreeNode, B: TreeNode) -> bool:
def recur(A, B):
if B is None:
return True
if A is None:
return False
if A.val!=B.val:
return False
return recur(A.left, B.left) and recur(A.right, B.right)
if A is None or B is None:
return False
return recur(A, B) or self.isSubStructure(A.left, B) or self.isSubStructure(A.right, B)
执行用时:136 ms, 在所有 Python3 提交中击败了49.59%的用户
内存消耗:18.9 MB, 在所有 Python3 提交中击败了5.41%的用户
|
[
"noreply@github.com"
] |
baketbek.noreply@github.com
|
eeba4aace767cd3308d15373a9beb81f35f19740
|
40195e6f86bf8620850f0c56e98eae5693e88277
|
/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py
|
010408b706010866823ce1f7b6e8f10534350d69
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
apple/coremltools
|
009dfa7154d34cab8edcafa618e689e407521f50
|
feed174188f7773631a3d574e1ff9889a135c986
|
refs/heads/main
| 2023-09-01T23:26:13.491955
| 2023-08-31T18:44:31
| 2023-08-31T18:44:31
| 95,862,535
| 3,742
| 705
|
BSD-3-Clause
| 2023-09-14T17:33:58
| 2017-06-30T07:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,346
|
py
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry
register_op = SSAOpRegistry.register_op
# This file contains the TF dialect of SSA. Briefly, these ops are only
# understandable in the TF frontend and not acceptable in the standard op set.
# No backend would support any of the op here. These ops exist to facilitate
# frontend SSA passes, but must be replaced with standard ops during SSA
# passes.
# All tf op must start with 'tf_' prefix.
#
# tf_make_list allows elem_shape to be unspecified. core op make_list does
# not allow that.
@register_op(namespace="tf")
class tf_make_list(Operation):
input_spec = InputSpec(
init_length=TensorInputType(optional=True, type_domain=types.int32),
dynamic_length=TensorInputType(optional=True, type_domain=types.bool),
elem_shape=TensorInputType(const=True, optional=True, type_domain=types.int32),
dtype=TensorInputType(const=True, optional=True, type_domain=types.str),
)
def default_inputs(self):
return DefaultInputs(
init_length=1,
dynamic_length=True,
dtype="fp32",
)
def type_inference(self):
init_length = self.init_length.val
if self.elem_shape is None or self.elem_shape.sym_val is None:
return types.list(
types.unknown,
init_length=init_length,
dynamic_length=self.dynamic_length.val,
)
builtin_dtype = types.string_to_builtin(self.dtype.val)
elem_type = types.tensor(builtin_dtype, self.elem_shape.sym_val)
return types.list(
elem_type, init_length=init_length, dynamic_length=self.dynamic_length.val
)
class TfLSTMBase(Operation):
"""
Common LSTM inputs for BlockLSTMCell and BlockLSTM.
"""
input_spec = InputSpec(
c_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim]
h_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim]
# weight: [input_dim + hidden_dim, 4*hidden_dim] (icfo layout)
weight=TensorInputType(const=True, type_domain="T"),
forget_bias=TensorInputType(const=True, optional=True, type_domain="T"),
# cell_clip == None implies not using cell clip
cell_clip=TensorInputType(const=True, optional=True, type_domain="T"),
# If use_peephole == False, weight_peep_* is ignored
use_peephole=TensorInputType(const=True, optional=True, type_domain=types.bool),
weight_peep_i=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,]
weight_peep_f=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,]
weight_peep_o=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,]
bias=TensorInputType(const=True, type_domain="T"), # [4*hidden_dim] (icfo layout)
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
return DefaultInputs(
forget_bias=1.,
use_peephole=False,
)
def _check_peephole_weights(self):
# Check weight_peep_*
if self.use_peephole.val:
if (
self.weight_peep_i is None
or self.weight_peep_f is None
or self.weight_peep_o is None
):
raise ValueError(
"weight_peep_* cannot be None when use_peephole is True"
)
@register_op(namespace="tf")
class tf_lstm_block_cell(TfLSTMBase):
"""
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev .* wci + i)
f = sigmoid(cs_prev .* wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
"""
input_spec = (
InputSpec(x=TensorInputType(type_domain="T"),) + TfLSTMBase.input_spec # [batch, input_dim]
)
def __init__(self, **kwargs):
super(tf_lstm_block_cell, self).__init__(**kwargs)
def type_inference(self):
self._check_peephole_weights()
# all return shapes are [batch, hidden_dim]
ret_shape = self.c_prev.shape
dtype = self.x.dtype
# See
# https://www.tensorflow.org/api_docs/python/tf/raw_ops/LSTMBlockCell
# All returned shapes are [batch, hidden_dim]
return (
types.tensor(dtype, ret_shape), # i
types.tensor(dtype, ret_shape), # cs
types.tensor(dtype, ret_shape), # f
types.tensor(dtype, ret_shape), # o
types.tensor(dtype, ret_shape), # ci
types.tensor(dtype, ret_shape), # co
types.tensor(dtype, ret_shape),
) # h
@register_op(namespace="tf")
class tf_lstm_block(TfLSTMBase):
"""
Apply LSTM to an input sequence
"""
input_spec = (
InputSpec(
seq_len=TensorInputType(type_domain=types.int32), # int
x=TensorInputType(type_domain="T"), # [padded_len, batch, input_dim]
)
+ TfLSTMBase.input_spec
)
def type_inference(self):
self._check_peephole_weights()
padded_len = self.x.shape[0]
ret_shape = [padded_len] + list(self.c_prev.shape)
dtype = self.x.dtype
# All returned shapes are [padded_len, b, hidden_dim]
return (
types.tensor(dtype, ret_shape), # i
types.tensor(dtype, ret_shape), # cs
types.tensor(dtype, ret_shape), # f
types.tensor(dtype, ret_shape), # o
types.tensor(dtype, ret_shape), # ci
types.tensor(dtype, ret_shape), # co
types.tensor(dtype, ret_shape),
) # h
|
[
"noreply@github.com"
] |
apple.noreply@github.com
|
e5dc0ac3c5d8a27863f7618f82a43abfc0a2d5f0
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pandas/tests/series/methods/test_sort_index.py
|
6fa4eeaee34c0360b902671615b82294e3eea970
|
[
"MIT"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684
| 2023-06-26T00:53:53
| 2023-06-26T00:53:53
| 94,152,032
| 14
| 0
|
MIT
| 2023-09-04T02:53:29
| 2017-06-13T00:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,013
|
py
|
import random
import numpy as np
import pytest
from pandas import IntervalIndex, MultiIndex, Series
import pandas._testing as tm
class TestSeriesSortIndex:
def test_sort_index(self, datetime_series):
rindex = list(datetime_series.index)
random.shuffle(rindex)
random_order = datetime_series.reindex(rindex)
sorted_series = random_order.sort_index()
tm.assert_series_equal(sorted_series, datetime_series)
# descending
sorted_series = random_order.sort_index(ascending=False)
tm.assert_series_equal(
sorted_series, datetime_series.reindex(datetime_series.index[::-1])
)
# compat on level
sorted_series = random_order.sort_index(level=0)
tm.assert_series_equal(sorted_series, datetime_series)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>"
with pytest.raises(ValueError, match=msg):
random_order.sort_values(axis=1)
sorted_series = random_order.sort_index(level=0, axis=0)
tm.assert_series_equal(sorted_series, datetime_series)
with pytest.raises(ValueError, match=msg):
random_order.sort_index(level=0, axis=1)
def test_sort_index_inplace(self, datetime_series):
# For GH#11402
rindex = list(datetime_series.index)
random.shuffle(rindex)
# descending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
assert result is None
tm.assert_series_equal(
random_order, datetime_series.reindex(datetime_series.index[::-1])
)
# ascending
random_order = datetime_series.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
assert result is None
tm.assert_series_equal(random_order, datetime_series)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level="A")
tm.assert_series_equal(backwards, res)
res = s.sort_index(level=["A", "B"])
tm.assert_series_equal(backwards, res)
res = s.sort_index(level="A", sort_remaining=False)
tm.assert_series_equal(s, res)
res = s.sort_index(level=["A", "B"], sort_remaining=False)
tm.assert_series_equal(s, res)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
# implicit sort_remaining=True
res = s.sort_index(level=level)
tm.assert_series_equal(backwards, res)
# GH#13496
# sort has no effect without remaining lvls
res = s.sort_index(level=level, sort_remaining=False)
tm.assert_series_equal(s, res)
def test_sort_index_kind(self):
# GH#14444 & GH#13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind="mergesort")
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="quicksort")
tm.assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind="heapsort")
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position(self):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first")
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last")
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_sort_index_intervals(self):
s = Series(
[np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4])
)
result = s.sort_index()
expected = s
tm.assert_series_equal(result, expected)
result = s.sort_index(ascending=False)
expected = Series(
[3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1])
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_list, sorted_list, ascending, ignore_index, output_index",
[
([2, 3, 6, 1], [2, 3, 6, 1], True, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [2, 3, 6, 1], True, False, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, True, [0, 1, 2, 3]),
([2, 3, 6, 1], [1, 6, 3, 2], False, False, [3, 2, 1, 0]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_list, sorted_list, ascending, ignore_index, output_index
):
# GH 30114
ser = Series(original_list)
expected = Series(sorted_list, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_ser = ser.copy()
result_ser.sort_index(**kwargs)
else:
result_ser = ser.sort_index(**kwargs)
tm.assert_series_equal(result_ser, expected)
tm.assert_series_equal(ser, Series(original_list))
|
[
"ulcamilo@gmail.com"
] |
ulcamilo@gmail.com
|
e25b321b40b2d26cad85048d4c5e8ef0c54588b3
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Maths/Polynomials_Functions/Fast Fourier Transform.py
|
3667f91c4d5ebbe4681f2f971d13bc638492912a
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162
| 2019-11-10T11:08:23
| 2019-11-10T11:08:23
| 154,487,015
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
# Created by Bogdan Trif on 12-02-2018 , 11:31 AM.
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack
N = 100
x = np.linspace(0,2*np.pi,100)
y = np.sin(x) + np.random.random(100) * 0.8
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
w = scipy.fftpack.rfft(y)
f = scipy.fftpack.rfftfreq(N, x[1]-x[0])
spectrum = w**2
cutoff_idx = spectrum < (spectrum.max()/5)
w2 = w.copy()
w2[cutoff_idx] = 0
y2 = scipy.fftpack.irfft(w2)
fig = plt.figure(1, figsize=(18,10))
plt.title( ' FAST FOURIER TRANSFORM ')
ax = plt.subplot(111)
plt.plot(x, y2,'-' , linewidth = 2 ,label = 'Fast Fourier Transfrom' )
plt.plot( x, smooth(y, 3), 'r-' , lw=1 , label = 'smooth 3' )
plt.plot(x, smooth(y , 19 ), 'g-', lw=1 , label = 'smooth 19')
plt.legend(loc = 0)
plt.grid(which ='both')
plt.show()
plt.plot(x,spectrum, 'm-', lw=1 , label = 'spectrum')
plt.show()
|
[
"bogdan.evanzo@gmail.com"
] |
bogdan.evanzo@gmail.com
|
f2816e0ff2de1b5a3f38f5624c01e9a11ee49dbe
|
edbb7a53633cba3b17aad4501d532d92159a1041
|
/DirtyBits/wsgi.py
|
455df86e3f0091903fefcae922e2e33c885fa158
|
[] |
no_license
|
himdhiman/DirtyBitsFinal
|
b5be2b87f85d15a7dce490692df9b6086aadbb74
|
f3bd541426af7633a51ab36579cf61fe28a289dc
|
refs/heads/master
| 2023-06-19T13:05:53.755343
| 2021-07-19T14:46:29
| 2021-07-19T14:46:29
| 385,284,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DirtyBits.settings')
application = get_wsgi_application()
|
[
"himanshudhiman9313@gmail.com"
] |
himanshudhiman9313@gmail.com
|
b9837e4481aada40e2b6e49cb7aa361f626d1e68
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20180811/example_wsgiref/00hello.py
|
5f59790ba629fefc3066cf9fc505d9309baaa971
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
import json
from wsgiref.simple_server import make_server
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json')]
start_response(status, headers)
data = {
"msg": "hello world",
}
return [json.dumps(data).encode("utf-8")]
if __name__ == "__main__":
port = 5000
with make_server('', port, app) as httpd:
httpd.serve_forever()
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
7fce08c60328e65c1c9c8a5d15b2b979c59657a4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_103/ch34_2020_03_30_12_51_14_514066.py
|
b02ea9ff387824b0e5bcc10b0ac45bf455ddf9a1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
def eh_primo(x):
if x%2==0:
return False
elif x%2!=0:
y=3
if x%y==0:
y+=2
return False
elif x==1 or x==0:
return False
elif x==2:
return True
else:
return True
def maior_primo_menor_que(n):
while x<=n:
if n%x==0:
return x
else:
return -1
|
[
"you@example.com"
] |
you@example.com
|
1884af8f4634d7ebb0c4640f69361692cd0c7867
|
2ba46d8b7ac3538619b074b50bad126867a2c27c
|
/src/zojax/messaging/browser/breadcrumb.py
|
df1c4095fb87a19689e3695a70458dccbadea7d9
|
[
"ZPL-2.1"
] |
permissive
|
Zojax/zojax.messaging
|
2baca5eb4d2d8b3bd23462f6d216925b9ff4cc23
|
7d6c4216763a93ae41251d7d4224c83654806339
|
refs/heads/master
| 2016-09-06T11:41:41.037505
| 2011-12-16T07:18:52
| 2011-12-16T07:18:52
| 2,026,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" custom IBreadcrumb implementation for IMessageStorage
$Id$
"""
from zope import interface, component
from z3c.breadcrumb.browser import GenericBreadcrumb
from zojax.messaging.interfaces import _, IMessageStorage
class MessagesBreadcrumb(GenericBreadcrumb):
component.adapts(IMessageStorage, interface.Interface)
name = _(u'My messages')
|
[
"andrey.fedoseev@gmail.com"
] |
andrey.fedoseev@gmail.com
|
455590a886a1e5acccb85548fbd3af500d74f358
|
73abb3e522c93f1d0d0b93035b034a9f5b8e9867
|
/kevlar/cli/localize.py
|
e451ae7003db0e55ac891f9dba10c131e49506aa
|
[
"MIT"
] |
permissive
|
jchow32/kevlar
|
4caa3fae43857e30a527871b33bce4b30941e265
|
59bb409407b08011e86fd0a1198584cb4d3d5998
|
refs/heads/master
| 2021-05-05T15:14:23.071710
| 2018-01-11T20:52:54
| 2018-01-11T20:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2017 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
def subparser(subparsers):
"""Define the `kevlar localize` command-line interface."""
desc = """\
Given a reference genome and a contig (or set of contigs) assembled from
variant-related reads, retrieve the portion of the reference genome
corresponding to the variant. NOTE: this command relies on the `bwa`
program being in the PATH environmental variable.
"""
subparser = subparsers.add_parser('localize', description=desc)
subparser.add_argument('-x', '--max-diff', type=int, metavar='X',
default=10000, help='span of all k-mer starting '
'positions should not exceed X bp; default is '
'10000 (10 kb)')
subparser.add_argument('-d', '--delta', type=int, metavar='D',
default=25, help='retrieve the genomic interval '
'from the reference by extending beyond the span '
'of all k-mer starting positions by D bp')
subparser.add_argument('-o', '--out', metavar='FILE', default='-',
help='output file; default is terminal (stdout)')
subparser.add_argument('-k', '--ksize', type=int, metavar='K', default=31,
help='k-mer size; default is 31')
subparser.add_argument('contigs', help='assembled reads in Fasta format')
subparser.add_argument('refr', help='BWA indexed reference genome')
|
[
"daniel.standage@gmail.com"
] |
daniel.standage@gmail.com
|
cb91a4068110a5c6fd0628653fcaba5ac636754d
|
6147d3da9c7f31a658f13892de457ed5a9314b22
|
/linked_list/python/nth_from_last.py
|
314466ee9a99cf03f7eff6af1b33c26300b7d3ec
|
[] |
no_license
|
ashish-bisht/must_do_geeks_for_geeks
|
17ba77608eb2d24cf4adb217c8e5a65980e85609
|
7ee5711c4438660db78916cf876c831259109ecc
|
refs/heads/master
| 2023-02-11T22:37:03.302401
| 2021-01-03T05:53:03
| 2021-01-03T05:53:03
| 320,353,079
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
class Node:
def __init__(self, val):
self.val = val
self.next = None
def nth_from_last(head, n):
slow = fast = head
while n > 0 and fast:
fast = fast.next
n -= 1
if n > 0 and not fast:
return "List ni hai bhai"
if not fast:
return head.val
while fast:
fast = fast.next
slow = slow.next
return slow.val
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
print(nth_from_last(head, 2))
print(nth_from_last(head, 5))
print(nth_from_last(head, 6))
|
[
"ashishbisht723@gmail.com"
] |
ashishbisht723@gmail.com
|
2e786f5af1ea5a3664be4e996d7a6b8e922d57c1
|
6c6f439b6777ba08f50de3a84cb236b9506a3216
|
/Chapitre_5/tkinter_3.py
|
74465851f2c1d2fff4858135861b6ba802f5f1dc
|
[] |
no_license
|
badiskasmi/py-rasp-2e-edition
|
2b87fcd4c3df4ae7ddbb3012c961d7c4fec1450c
|
c83fe620b6c61e7697af1b4e67a0fc71c5b99a9d
|
refs/heads/master
| 2023-04-09T09:05:22.054570
| 2018-12-27T19:40:46
| 2018-12-27T19:40:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
#!/usr/bin/env python3
from tkinter import Tk
LARGEUR = 400
HAUTEUR = 300
def main():
fenetre = Tk()
largeur_div = (fenetre.winfo_screenwidth() - LARGEUR) // 2
hauteur_div = (fenetre.winfo_screenheight() - HAUTEUR) // 2
dimensions = '{f_x}x{f_y}+{p_x}+{p_y}'.format(
f_x=LARGEUR,
f_y=HAUTEUR,
p_x=largeur_div,
p_y=hauteur_div
)
fenetre.geometry(dimensions)
fenetre.mainloop()
if __name__ == '__main__':
main()
|
[
"monsieurp@gentoo.org"
] |
monsieurp@gentoo.org
|
16a912b2fce7051d68803fddfac2d2466e9eec03
|
2a4cdd62600c95b9a52e198801a042ad77163077
|
/forward/common/validate/validate_code.py
|
7e52b250ca0530fd242be2afc6be7bcbdec1406b
|
[] |
no_license
|
thm-tech/forward
|
2c0bb004d14ea6ab807e526a1fa88c61e80c80e4
|
01ca49387754d38717f8cc07fd27127edb035b87
|
refs/heads/master
| 2021-01-10T02:34:35.098761
| 2015-12-08T03:29:47
| 2015-12-08T03:29:47
| 46,783,502
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,259
|
py
|
# -*- encoding: utf-8 -*-
import random
import tempfile
import os
pp = os.path.dirname(os.path.realpath(__file__))
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from forward.common.tools import pathin
_letter_cases = "abcdefghjkmnpqrstuvwxy" # 小写字母,去除可能干扰的i,l,o,z
_upper_cases = _letter_cases.upper() # 大写字母
_numbers = ''.join(map(str, range(3, 10))) # 数字
init_chars = ''.join((_numbers)) + _letter_cases
def create_validate_code(size=(120, 30),
chars=init_chars,
img_type="GIF",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=18,
font_type=pathin('./consola.ttf'),
length=4,
draw_lines=True,
n_line=(1, 2),
draw_points=True,
point_chance=2):
"""
@todo: 生成验证码图片
@param size: 图片的大小,格式(宽,高),默认为(120, 30)
@param chars: 允许的字符集合,格式字符串
@param img_type: 图片保存的格式,默认为GIF,可选的为GIF,JPEG,TIFF,PNG
@param mode: 图片模式,默认为RGB
@param bg_color: 背景颜色,默认为白色
@param fg_color: 前景色,验证码字符颜色,默认为蓝色#0000FF
@param font_size: 验证码字体大小
@param font_type: 验证码字体,默认为 consola.ttf
@param length: 验证码字符个数
@param draw_lines: 是否划干扰线
@param n_lines: 干扰线的条数范围,格式元组,默认为(1, 2),只有draw_lines为True时有效
@param draw_points: 是否画干扰点
@param point_chance: 干扰点出现的概率,大小范围[0, 100]
@return: [0]: PIL Image实例
@return: [1]: 验证码图片中的字符串
"""
width, height = size # 宽, 高
img = Image.new(mode, size, bg_color) # 创建图形
draw = ImageDraw.Draw(img) # 创建画笔
def get_chars():
'''生成给定长度的字符串,返回列表格式'''
return random.sample(chars, length)
def create_lines():
'''绘制干扰线'''
line_num = random.randint(*n_line) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
# 结束点
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
'''绘制干扰点'''
chance = min(100, max(0, int(point_chance))) # 大小限制在[0, 100]
for w in xrange(width):
for h in xrange(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
'''绘制验证码字符'''
c_chars = get_chars()
strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭曲
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
return img, strs
if __name__ == "__main__":
code = create_validate_code()
code_img = code[0]
code_str = code[1]
print('validate_code', code_str)
code_img.save("validate.gif", "GIF")
|
[
"mohanson@163.com"
] |
mohanson@163.com
|
90e1cd44b44697565e0e3671ecef51463c51e0e1
|
93d43b915b3853eac80975b6692ffed3b146c89f
|
/network/udp_client.py
|
d8c1ea926d68997d6e835ad861a700f808eecdb2
|
[] |
no_license
|
Killsan/python
|
32ca8b4f1fa4a8a5d70ce6949d764938c37ff14d
|
0c79a482ade3396309cbd055e996438b82dcd3b1
|
refs/heads/master
| 2023-06-14T21:44:29.933776
| 2021-07-13T18:28:39
| 2021-07-13T18:28:39
| 220,499,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(b'connected', ('127.0.0.1', 8888))
#(<ip>, <port>); (unix.sock)
|
[
"reversflash47@gmail.com"
] |
reversflash47@gmail.com
|
e3f392121a9ec6d09d8a3f65d4276958197c78ce
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/6717daf4a6e92aa49da486c4ffc06201b9fa4611-<test_realm_filter_events>-bug.py
|
9fab8c660152c9383eb365c715984d751f7b4342
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
def test_realm_filter_events(self) -> None:
schema_checker = self.check_events_dict([('type', equals('realm_filters')), ('realm_filters', check_list(None))])
events = self.do_test((lambda : do_add_realm_filter(self.user_profile.realm, '#(?P<id>[123])', 'https://realm.com/my_realm_filter/%(id)s')))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test((lambda : do_remove_realm_filter(self.user_profile.realm, '#(?P<id>[123])')))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
06ae6fcfbeaf3ef19671198d81850f9474d6b46b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_248/ch76_2020_04_12_20_54_45_523952.py
|
3764fea605aa8ea89077073aeb44632ac15ffc01
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
def aniversariantes_de_setembro(dicionario):
dicionario={}
for k,v in dicionario.items():
if v[3]=="0" and v[4]=="9":
dicionario[k]=v
i+=1
return dicionario
|
[
"you@example.com"
] |
you@example.com
|
4e436d01a7d9d68d3c34894ae9851687f1de4c6f
|
700577285824a21df647aba584d51420db59c598
|
/OpenColibri/allauth/account/app_settings.py
|
c7f23b95cd0a2fdd70cdb71e42de82fc305f924b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
epu-ntua/opencolibri
|
2c05acc43ef1b1c86608f6e729a4f83773b01b73
|
78e2411f78a0213b3961145cfe67cd52398cea70
|
refs/heads/master
| 2016-09-11T02:39:43.798777
| 2014-04-06T11:30:39
| 2014-04-06T11:30:39
| 15,764,540
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,397
|
py
|
class AppSettings(object):
class AuthenticationMethod:
USERNAME = 'username'
EMAIL = 'email'
USERNAME_EMAIL = 'username_email'
class EmailVerificationMethod:
# After signing up, keep the user account inactive until the email
# address is verified
MANDATORY = 'mandatory'
# Allow login with unverified e-mail (e-mail verification is still sent)
OPTIONAL = 'optional'
# Don't send e-mail verification mails during signup
NONE = 'none'
def __init__(self, prefix):
self.prefix = prefix
# If login is by email, email must be required
assert (not self.AUTHENTICATION_METHOD
== self.AuthenticationMethod.EMAIL) or self.EMAIL_REQUIRED
# If login includes email, login must be unique
assert (self.AUTHENTICATION_METHOD
== self.AuthenticationMethod.USERNAME) or self.UNIQUE_EMAIL
assert (self.EMAIL_VERIFICATION
!= self.EmailVerificationMethod.MANDATORY) \
or self.EMAIL_REQUIRED
def _setting(self, name, dflt):
from django.conf import settings
return getattr(settings, self.prefix + name, dflt)
@property
def EMAIL_CONFIRMATION_EXPIRE_DAYS(self):
"""
Determines the expiration date of e-mail confirmation mails (#
of days)
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_EXPIRE_DAYS",
getattr(settings, "EMAIL_CONFIRMATION_DAYS", 3))
@property
def EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case of an authenticated user
"""
return self._setting("EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL",
None)
@property
def EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case no user is logged in
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL",
settings.LOGIN_URL)
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
return self._setting("EMAIL_REQUIRED", False)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
ret = self._setting("EMAIL_VERIFICATION",
self.EmailVerificationMethod.OPTIONAL)
# Deal with legacy (boolean based) setting
if ret == True:
ret = self.EmailVerificationMethod.MANDATORY
elif ret == False:
ret = self.EmailVerificationMethod.OPTIONAL
return ret
@property
def AUTHENTICATION_METHOD(self):
from django.conf import settings
if hasattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION"):
import warnings
warnings.warn("ACCOUNT_EMAIL_AUTHENTICATION is deprecated,"
" use ACCOUNT_AUTHENTICATION_METHOD",
DeprecationWarning)
if getattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION"):
ret = self.AuthenticationMethod.EMAIL
else:
ret = self.AuthenticationMethod.USERNAME
else:
ret = self._setting("AUTHENTICATION_METHOD",
self.AuthenticationMethod.USERNAME)
return ret
@property
def UNIQUE_EMAIL(self):
"""
Enforce uniqueness of e-mail addresses
"""
return self._setting("UNIQUE_EMAIL", True)
@property
def SIGNUP_PASSWORD_VERIFICATION(self):
"""
Signup password verification
"""
return self._setting("SIGNUP_PASSWORD_VERIFICATION", True)
@property
def PASSWORD_MIN_LENGTH(self):
"""
Minimum password Length
"""
return self._setting("PASSWORD_MIN_LENGTH", 6)
@property
def EMAIL_SUBJECT_PREFIX(self):
"""
Subject-line prefix to use for email messages sent
"""
return self._setting("EMAIL_SUBJECT_PREFIX", None)
@property
def SIGNUP_FORM_CLASS(self):
"""
Signup form
"""
return self._setting("SIGNUP_FORM_CLASS", None)
@property
def USERNAME_REQUIRED(self):
"""
The user is required to enter a username when signing up
"""
return self._setting("USERNAME_REQUIRED", True)
@property
def USERNAME_MIN_LENGTH(self):
"""
Minimum username Length
"""
return self._setting("USERNAME_MIN_LENGTH", 1)
@property
def PASSWORD_INPUT_RENDER_VALUE(self):
"""
render_value parameter as passed to PasswordInput fields
"""
return self._setting("PASSWORD_INPUT_RENDER_VALUE", False)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.account.adapter.DefaultAccountAdapter')
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys
sys.modules[__name__] = AppSettings('ACCOUNT_')
|
[
"smouzakitis@epu.ntua.gr"
] |
smouzakitis@epu.ntua.gr
|
a7d91e08aac4a3cbc8c9e57421d6606d8a90881f
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/waterfall/_name.py
|
608e0ac1ba031e5709a5828581f3e4766451e155
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="waterfall", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
0377cbc8e9ba2d59e28b08a5a1ce0f5fa8ca1010
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/fact/next_government/own_group/call_case_up_problem/large_point/little_life.py
|
b2b99d6c9740a5162e0e6a7470cbb95876144f50
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
#! /usr/bin/env python
def early_eye(str_arg):
last_company_or_right_company(str_arg)
print('work_world')
def last_company_or_right_company(str_arg):
print(str_arg)
if __name__ == '__main__':
early_eye('place_or_woman')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
9e25c636f9da9596f495b991c9608fe75b4649d3
|
e3d447a81c5462d2d14201f2bc6b82cdcbbca51a
|
/chapter11/c11_2_city_functions.py
|
e4ca3478294a70d075558e1ec889a6b75ce34d05
|
[] |
no_license
|
barcern/python-crash-course
|
f6026f13f75ecddc7806711d65bc53cb88e24496
|
8b55775c9f0ed49444becb35b8d529620537fa54
|
refs/heads/master
| 2023-04-19T17:28:44.342022
| 2021-02-07T23:51:06
| 2021-02-07T23:51:06
| 257,201,280
| 2
| 3
| null | 2021-05-12T17:35:56
| 2020-04-20T07:14:28
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 21:29:14 2020
@author: barbora
Store functions for 11-1.
"""
def return_formatted_city_country(city, country, population=''):
"""Return a string formatted as City, Country - population xxx
when population data available. Otherwise return as City, Country.
"""
if population:
formatted = f"{city.title()}, {country.title()}"
formatted += f" - population {population}"
else:
formatted = f"{city.title()}, {country.title()}"
return formatted
|
[
"bcernakova01@gmail.com"
] |
bcernakova01@gmail.com
|
16edaa237e253f240e580d3cce27529f3a7004c9
|
5ae01ab82fcdedbdd70707b825313c40fb373fa3
|
/scripts/charonInterpreter/parsers/MaterialBlock/TrapSRH/TrapSRHParserLib.py
|
4c71b1d4270cf5a60be099655815429e5c8b8c61
|
[] |
no_license
|
worthenmanufacturing/tcad-charon
|
efc19f770252656ecf0850e7bc4e78fa4d62cf9e
|
37f103306952a08d0e769767fe9391716246a83d
|
refs/heads/main
| 2023-08-23T02:39:38.472864
| 2021-10-29T20:15:15
| 2021-10-29T20:15:15
| 488,068,897
| 0
| 0
| null | 2022-05-03T03:44:45
| 2022-05-03T03:44:45
| null |
UTF-8
|
Python
| false
| false
| 3,700
|
py
|
try:
import coloramaDISABLED as colors
except ImportError:
class stubColors:
"subs for colors when colors doesn't exist on system"
def __init__(self):
self.Fore = colorClass()
self.Back = colorClass()
self.Style = styleClass()
class colorClass():
"stubbed color class"
def __init__(self):
self.BLACK = ""
self.BLUE = ""
self.WHITE = ""
self.RED = ""
self.GREEN = ""
class styleClass():
"stubbed style class"
def __init__(self):
self.RESET_ALL = ""
colors = stubColors()
import sys
from .charonLineParserPhononEnergy import *
from .charonLineParserTrapDensity import *
from .charonLineParserEnergyLevel import *
from .charonLineParserHuangRhysFactor import *
from .charonLineParserTrapType import *
from .charonBlockParserElectronTunnelingParams import *
from .charonBlockParserHoleTunnelingParams import *
from .ElectronTunnelingParams.ElectronTunnelingParamsParserLib import *
from .HoleTunnelingParams.HoleTunnelingParamsParserLib import *
class TrapSRHParserLib:
"This is the TrapSRHParserLib parser library "
def __init__(self):
# set the parser library name
self.parserLibName = "TrapSRHParserLib"
# create the linparser objects
self.lineParsers = []
self.lineParsers.append(charonLineParserPhononEnergy())
self.lineParsers.append(charonLineParserTrapDensity())
self.lineParsers.append(charonLineParserEnergyLevel())
self.lineParsers.append(charonLineParserHuangRhysFactor())
self.lineParsers.append(charonLineParserTrapType())
# create the blockparser objects
self.blockParsers = []
self.blockParsers.append([charonBlockParserElectronTunnelingParams(),ElectronTunnelingParamsParserLib()])
self.blockParsers.append([charonBlockParserHoleTunnelingParams(),HoleTunnelingParamsParserLib()])
# create the parserLibrary objects
parserLibraries = []
parserLibraries.append(ElectronTunnelingParamsParserLib())
parserLibraries.append(HoleTunnelingParamsParserLib())
def isThisMyLine(self,tokenizer,line):
for lP in self.lineParsers:
self.isThisMe = lP.isThisMe(tokenizer,line)
if self.isThisMe == True:
return (True,lP)
return (False,None)
def isThisMyBlock(self,tokenizer,line):
for bP in self.blockParsers:
self.isThisMe = bP[0].isThisMe(tokenizer,line)
if self.isThisMe == True:
return (True,bP[0],bP[1])
return (False,None,None)
def generateHelp(self,genHelp,indent):
self.addIndent = " "
cRStyle = ""
for lP in self.lineParsers:
(self.helpLine,self.helpContent) = lP.getHelp(genHelp)
self.helpContentList = self.helpContent.split("<>")
print (cRStyle+indent+colors.Fore.RED+colors.Back.WHITE+self.helpLine)
cRStyle = "\n"
for hCL in self.helpContentList:
print ("\t"+indent+colors.Fore.BLUE+colors.Back.WHITE+hCL.lstrip())
for bP in range(len(self.blockParsers)):
print (indent+colors.Fore.GREEN+colors.Back.WHITE+self.blockParsers[bP][0].getHelpLine().lstrip())
self.blockParsers[bP][1].generateHelp(genHelp,indent+self.addIndent)
print (indent+colors.Fore.GREEN+colors.Back.WHITE+self.blockParsers[bP][0].getHelpLine().replace("start","end").lstrip())
print (indent+colors.Style.RESET_ALL)
def getName(self):
return self.parserLibName
|
[
"juan@tcad.com"
] |
juan@tcad.com
|
c5cdf70a00c21b9e801db37dcf16fc563db36592
|
6e4702fa7ff89c59871148e8007b769506fffe5b
|
/SmartClass/teacher/migrations/0010_cauhoi_dinh_kem.py
|
012736603018a2929917ba9730924c58cc1dd535
|
[] |
no_license
|
vuvandang1995/hvan
|
3a7bd8892bed30c6c6f4062bcf7a2c2804f34b0d
|
003d9362739944cb3bcd8d1b11dd5b7481d7bd81
|
refs/heads/master
| 2020-05-02T02:31:51.121181
| 2019-03-27T02:32:29
| 2019-03-27T02:32:29
| 177,706,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 2.1 on 2018-09-24 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teacher', '0009_auto_20180919_1601'),
]
operations = [
migrations.AddField(
model_name='cauhoi',
name='dinh_kem',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
|
[
"dangdiendao@gmail.com"
] |
dangdiendao@gmail.com
|
b58921544b80556ce223763f385799d172a1203f
|
9d6218ca6c75a0e1ec1674fe410100d93d6852cb
|
/app/supervisor/venvs/supervisor/bin/pidproxy
|
067992ca6ce7b31e95969cd766069a5c66c5e897
|
[] |
no_license
|
bopopescu/uceo-2015
|
164694268969dd884904f51b00bd3dc034695be8
|
5abcbfc4ff32bca6ca237d71cbb68fab4b9f9f91
|
refs/heads/master
| 2021-05-28T21:12:05.120484
| 2015-08-05T06:46:36
| 2015-08-05T06:46:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
#!/edx/app/supervisor/venvs/supervisor/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'supervisor==3.1.3','console_scripts','pidproxy'
__requires__ = 'supervisor==3.1.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('supervisor==3.1.3', 'console_scripts', 'pidproxy')()
)
|
[
"root@uceociputra.com"
] |
root@uceociputra.com
|
|
1fcab6195234c0ee9d1fc17fd7ef16750b34d793
|
ede0935c59217973665bc7bef2337f413e5a8c92
|
/my_configs/_base_/models/cascade_mask_rcnn_r50_fpn.py
|
163f7584fa7ed4e72444fa5786d0073890ffdf7b
|
[
"Apache-2.0"
] |
permissive
|
Daniel-xsy/MMDet_VisDrone2021
|
6da6d23c324afb7be9056bb6628b25f5d688af2a
|
668d10b61a4b99dda0163b67b093cad2e699ee3b
|
refs/heads/master
| 2023-07-10T02:42:06.474341
| 2021-08-14T09:36:12
| 2021-08-14T09:36:12
| 376,227,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,410
|
py
|
# model settings
model = dict(
type='CascadeRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=10,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=10,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=10,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
[
"1491387884@qq.com"
] |
1491387884@qq.com
|
8b99bee6c2a0fa65a0e918224fb7fad1370fa83c
|
9b80999a1bdd3595022c9abf8743a029fde3a207
|
/26-Cluster Analysis in Python /K-Means Clustering /Uniform clustering patterns.py
|
f00c65cb93f72b60f9ab38a41377633cb2f30563
|
[] |
no_license
|
vaibhavkrishna-bhosle/DataCamp-Data_Scientist_with_python
|
26fc3a89605f26ac3b77c15dbe45af965080115a
|
47d9d2c8c93e1db53154a1642b6281c9149af769
|
refs/heads/master
| 2022-12-22T14:01:18.140426
| 2020-09-23T11:30:53
| 2020-09-23T11:30:53
| 256,755,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
'''Now that you are familiar with the impact of seeds, let us look at the bias in k-means clustering towards the formation of uniform clusters.
Let us use a mouse-like dataset for our next exercise. A mouse-like dataset is a group of points that resemble the head of a mouse: it has three clusters of points arranged in circles, one each for the face and two ears of a mouse.
Here is how a typical mouse-like dataset looks like (Source).
The data is stored in a Pandas data frame, mouse. x_scaled and y_scaled are the column names of the standardized X and Y coordinates of the data points.'''
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
mouse = pd.read_csv('/Users/vaibhav/Desktop/Python Projects/DataCamp-Data Scientist with python/26-Cluster Analysis in Python /K-Means Clustering /mouse.csv')
# Import the kmeans and vq functions
from scipy.cluster.vq import kmeans, vq
# Generate cluster centers
cluster_centers, distortion = kmeans(mouse[['x_scaled', 'y_scaled']], 3)
# Assign cluster labels
mouse['cluster_labels'], distortion_list = vq(mouse[['x_scaled', 'y_scaled']], cluster_centers)
# Plot clusters
sns.scatterplot(x='x_scaled', y='y_scaled',
hue='cluster_labels', data = mouse)
plt.show()
|
[
"vaibhavkrishna.bhosle@gmail.com"
] |
vaibhavkrishna.bhosle@gmail.com
|
286d6d8c03a680b5b4551b125a0cded889f6916f
|
f3e51466d00510f1dae58f1cb87dd53244ce4e70
|
/LeetCodes/Trees/SubtreeofAnotherTree.py
|
c6b61d426d4a645e799a145aaf8d9876fe7dbaec
|
[] |
no_license
|
chutianwen/LeetCodes
|
40d18e7aa270f8235342f0485bfda2bd1ed960e1
|
11d6bf2ba7b50c07e048df37c4e05c8f46b92241
|
refs/heads/master
| 2022-08-27T10:28:16.594258
| 2022-07-24T21:23:56
| 2022-07-24T21:23:56
| 96,836,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
"""
iven two non-empty binary trees s and t, check whether tree t has exactly the same structure and node values with
a subtree of s. A subtree of s is a tree consists of a node in s and all of this node's descendants.
The tree s could also be considered as a subtree of itself.
Example 1:
Given tree s:
3
/ \
4 5
/ \
1 2
Given tree t:
4
/ \
1 2
Return true, because t has the same structure and node values with a subtree of s.
Example 2:
Given tree s:
3
/ \
4 5
/ \
1 2
/
0
Given tree t:
4
/ \
1 2
Return false.
"""
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
def isSame(p, q):
if p and q:
if p.val != q.val:
return False
else:
return isSame(p.left, q.left) and isSame(p.right, q.right)
else:
return p is q
if s is None:
return False
else:
# res = isSame(s, t)
# if res:
# return True
# else:
# return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
return isSame(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
|
[
"tianwen.chu@fedcentric.com"
] |
tianwen.chu@fedcentric.com
|
283836fff8c95f37f06a4335b71842bfc59315df
|
385f0577d98fbf81dd6310b6d826e54f76ae39e7
|
/tests/test_param_vasicek.py
|
8728a4530f5f14be3f505d5f1c9d7407b5a82ce4
|
[
"MIT"
] |
permissive
|
afcarl/diffusions
|
4cd5bf32332e64a2352b398eeb69167e98ea35f9
|
87a85b636cc5b78b9a89a8fd1c0c7e7426385952
|
refs/heads/master
| 2021-05-17T16:02:43.873818
| 2017-09-05T10:14:07
| 2017-09-05T10:14:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test suite for Vasicek parameter class.
"""
from __future__ import print_function, division
import unittest as ut
import numpy as np
import numpy.testing as npt
from diffusions import VasicekParam
class SDEParameterTestCase(ut.TestCase):
"""Test parameter classes."""
def test_vasicekparam_class(self):
"""Test Vasicek parameter class."""
mean, kappa, eta = 1.5, 1., .2
param = VasicekParam(mean, kappa, eta)
self.assertEqual(param.get_model_name(), 'Vasicek')
self.assertEqual(param.get_names(), ['mean', 'kappa', 'eta'])
self.assertEqual(param.mean, mean)
self.assertEqual(param.kappa, kappa)
self.assertEqual(param.eta, eta)
npt.assert_array_equal(param.get_theta(),
np.array([mean, kappa, eta]))
theta = np.ones(3)
param = VasicekParam.from_theta(theta)
npt.assert_array_equal(param.get_theta(), theta)
mat_k0 = param.kappa * param.mean
mat_k1 = -param.kappa
mat_h0 = param.eta**2
mat_h1 = 0
npt.assert_array_equal(param.mat_k0, mat_k0)
npt.assert_array_equal(param.mat_k1, mat_k1)
npt.assert_array_equal(param.mat_h0, mat_h0)
npt.assert_array_equal(param.mat_h1, mat_h1)
theta *= 2
param.update(theta)
npt.assert_array_equal(param.get_theta(), theta)
mat_k0 = param.kappa * param.mean
mat_k1 = -param.kappa
mat_h0 = param.eta**2
mat_h1 = 0
npt.assert_array_equal(param.mat_k0, mat_k0)
npt.assert_array_equal(param.mat_k1, mat_k1)
npt.assert_array_equal(param.mat_h0, mat_h0)
npt.assert_array_equal(param.mat_h1, mat_h1)
self.assertTrue(param.is_valid())
param = VasicekParam(mean, -kappa, eta)
self.assertFalse(param.is_valid())
param = VasicekParam(mean, kappa, -eta)
self.assertFalse(param.is_valid())
if __name__ == '__main__':
ut.main()
|
[
"khrapovs@gmail.com"
] |
khrapovs@gmail.com
|
937354f7eaee10d293189248aea1dcf827bd1511
|
3c1639bccf3fc0abc9c82c00ab92ac3f25cf105e
|
/Python3网络爬虫/section3-基本库使用/06-正则/01-match/04-贪婪与非贪婪.py
|
52ca63fa3326f58611c3db3505765dcc9288a9be
|
[
"Apache-2.0"
] |
permissive
|
LiuJunb/PythonStudy
|
783318a64496c2db41442ad66e0cc9253b392734
|
3386b9e3ccb398bfcfcd1a3402182811f9bb37ca
|
refs/heads/master
| 2022-12-11T05:22:53.725166
| 2018-11-15T01:34:37
| 2018-11-15T01:34:37
| 143,956,065
| 1
| 0
|
Apache-2.0
| 2022-11-22T01:58:23
| 2018-08-08T03:26:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 978
|
py
|
import re
# .*? (点星) 可以匹配任意字符(除换行符,例如节点会有换行)
# .? (点) 可以匹配任意字符(除换行符)
# * (星) 又代表匹配前面的字符无限次
content = 'Hello 1234567 World_This is a Regex Demo'
# 1.贪婪匹配(.*)尽量匹配最多的字符: ^He .* (\d+) .* Demo$
result = re.match('^He.*(\d+).*Demo$', content)
print(result) # <re.Match object; span=(0, 40), 01-match='Hello 1234567 World_This is a Regex Demo'>
print(result.group(1)) # 7
# 2.非贪婪匹配(.*?)尽量匹配最少的字符,例如在尾部时可以一个都不匹配: ^He .*? (\d+) .* Demo$
result2 = re.match('^He.*?(\d+).*Demo$', content)
print(result2) # <re.Match object; span=(0, 40), 01-match='Hello 1234567 World_This is a Regex Demo'>
print(result2.group(1)) # 1234567
# 今天天气和好[haha],适合敲代码[xixi]
# '\[ .* \]' --> [haha],适合敲代码[xixi]
# '\[ .*? \]' --> [haha]
|
[
"liujun@520it.com"
] |
liujun@520it.com
|
a8bc57e7b9636358a469235f83d9e06dd7c1bf0e
|
3f18a27cfea243d24be2d9428afad3bbd0ad6ec2
|
/gcamp_analysis_files_finished/180222-08-bottom-experiment/src/delta_video_config.py
|
989e334e7465dcf08bc26f105263ad6a24c65454
|
[
"MIT"
] |
permissive
|
riffelllab/Mosquito-larvae-analyses-1
|
a4d7e8cd29b6481438798ed7b455a931f1f8c2b5
|
2701b1b2055d6ee1722898f0fa4e64a9b12e7b24
|
refs/heads/master
| 2023-01-19T04:18:56.894245
| 2020-10-16T19:01:08
| 2020-10-16T19:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
class Config:
def __init__(self):
self.basename = 'delta_video'
self.directory = '/home/eleanor/Documents/gcamp_analysis_files_temp/180222-08-bottom-experiment/data'
self.topics = ['/multi_tracker/1/delta_video',]
self.record_length_hours = 1
|
[
"tabletopwhale@outlook.com"
] |
tabletopwhale@outlook.com
|
20544d415fefef5a3cdeef196113798818d32e24
|
1588a1d601d29c18942d220657185d3bf7b17160
|
/programmers/level3/pro12979.py
|
9eedf137f30657867380e7152b1ebc68e0008148
|
[] |
no_license
|
geonwoomun/AlgorithmStudy
|
1f8148e981beebd2e6f70e65193ce445fa59df96
|
d43b624aad80f10d687a8f4b37cc79d88fc772b3
|
refs/heads/master
| 2020-08-01T05:24:26.980370
| 2020-07-15T05:04:35
| 2020-07-15T05:04:35
| 210,878,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
# 프로그래머스 기지국 설치
# 풀다가 50점 정도는 나오는데 더 이상 풀기 어려워서... 블로그 참고 했습니다.
# 좀 더 단순하고 효율적으로 생각하는 방법을 길러야겠다..
import math
def solution(n, stations, w):
result = 0
distance = []
for i in range(1, len(stations)):
distance.append((stations[i] -w -1) - (stations[i-1] +w))
# 뒤의 기지국과 앞의 기지국 사이의 거리를 잰다.
distance.append(stations[0] - w -1) # 첫번째 기지국과 첫번째 아파트와의 거리
distance.append(n - (stations[-1] +w)) # 마지막 기지국과 마지막 아파트와의 거리
# 이렇게 하면 처음 기지국이 세워진 곳 빼고의 거리들을 모두 잴 수 있다.
# w가 2 일 때 가운데 지을 경우 총 5개의 거리를 커버할 수 있다.
# 거리가 6개 이상일 경우에는 기지국 1개를 더 지어야한다.
width = 2* w + 1 # 주파수가 끼칠 수 있는 범위
for dist in distance: # 거리들을 확인한다.
if(dist == 0): # 0이면 기지국 필요가 없으니 패스
continue
else :
result += math.ceil(dist / width) # 위에 설명처럼 dist /width의 올림한 것 만큼 기지국이 필요하다.
return result
|
[
"ansejrrhkd@naver.com"
] |
ansejrrhkd@naver.com
|
9d3ed49cac3834d5be43d76726bc795f44e0a3c5
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve-8.51.857815/carbon/common/lib/aiming.py
|
0091bdb2195110fac1d6ee59e5cdd282d592b617
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
#Embedded file name: carbon/common/lib\aiming.py
"""
Constants for the AI aiming system.
"""
AIMING_VALID_TARGET_GAZE_ID = 1
AIMING_VALID_TARGET_COMBAT_ID = 2
AIMING_CLIENTSERVER_FLAG_CLIENT = 1
AIMING_CLIENTSERVER_FLAG_SERVER = 2
AIMING_CLIENTSERVER_FLAG_BOTH = AIMING_CLIENTSERVER_FLAG_CLIENT | AIMING_CLIENTSERVER_FLAG_SERVER
AIMING_VALID_TARGETS = {AIMING_VALID_TARGET_GAZE_ID: (AIMING_VALID_TARGET_GAZE_ID,
'GazeTarget',
5.0,
AIMING_CLIENTSERVER_FLAG_BOTH),
AIMING_VALID_TARGET_COMBAT_ID: (AIMING_VALID_TARGET_COMBAT_ID,
'CombatTarget',
1.0,
AIMING_CLIENTSERVER_FLAG_BOTH)}
AIMING_VALID_TARGETS_FIELD_ID = 0
AIMING_VALID_TARGETS_FIELD_NAME = 1
AIMING_VALID_TARGETS_FIELD_RESELECTDELAY = 2
AIMING_VALID_TARGETS_FIELD_CLIENTSERVER_FLAG = 3
AIMING_CLIENTSERVER_FLAGS = {AIMING_CLIENTSERVER_FLAG_CLIENT: ' (Client only)',
AIMING_CLIENTSERVER_FLAG_SERVER: ' (Server only)',
AIMING_CLIENTSERVER_FLAG_BOTH: ' (Client & Server)'}
AIMING_COMPONENT_ENTITY_TYPE = 'entityType'
AIMING_ENTITY_TYPE_PC = 'PC'
AIMING_ENTITY_TYPE_NPC = 'NPC'
AIMING_ENTITY_TYPE_OBJECT = 'OBJECT'
AIMING_ENTITY_PC_TYPEID = 1
AIMING_ENTITY_NPC_TYPEID = 2
AIMING_ENTITY_OBJECT_TYPEID = 3
AIMING_ENTITY_TYPE_TO_ID = {AIMING_ENTITY_TYPE_PC: AIMING_ENTITY_PC_TYPEID,
AIMING_ENTITY_TYPE_NPC: AIMING_ENTITY_NPC_TYPEID,
AIMING_ENTITY_TYPE_OBJECT: AIMING_ENTITY_OBJECT_TYPEID}
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
fc9e43bf3077454377017ccba304ace293c1bc05
|
214e67e48b5f9c24bd64d9c04c94db86ee0c85e0
|
/arcerojas/Propietario/urls.py
|
98ae012e41de1138d6b45a94c070f8f795612e75
|
[] |
no_license
|
efnaranjo6/arcerojas
|
238542f11a91958cf5d3221781c8425c23a8a1c1
|
ace90508d2a95f837c255f9245af3d1bff0d8f02
|
refs/heads/main
| 2023-08-11T10:40:07.196737
| 2021-09-17T21:20:46
| 2021-09-17T21:20:46
| 407,409,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from django.urls import path
from .views import Propietarioview,Propietarioinsertar,Propietarioeditar,Propietarioeliminar
urlpatterns = [
path('', Propietarioview.as_view(), name='propietarios'),
path('propietario/new/', Propietarioinsertar.as_view(), name='Insertar'),
path('propietario/Editar/<int:pk>', Propietarioeditar.as_view(), name='Editar'),
path('propietario/eliminar/<int:pk>', Propietarioeliminar.as_view(), name='Eliminar'),
]
|
[
"efnaranjo6@misena.edu.co"
] |
efnaranjo6@misena.edu.co
|
5b8e8640194cc124752bcf19faabd9197a61a886
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/wikipedia/testcase/interestallcases/testcase2_008_2.py
|
7697a7118f975926627a9d91d9819390182e2f4e
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,859
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.wikipedia',
'appActivity' : 'org.wikipedia.main.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.wikipedia/org.wikipedia.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
swipe(driver, 0.5, 0.6, 0.5, 0.2)
else:
return element
return
def clickoncheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if (len(lists) == 1) :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
# preference setting and exit
try :
os.popen("adb shell svc data enable")
time.sleep(5)
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.DeveloperSettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"useRestbase_setManually\")").click()
clickoncheckable(driver, "new UiSelector().text(\"useRestbase_setManually\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"suppressNotificationPolling\")").click()
clickoncheckable(driver, "new UiSelector().text(\"suppressNotificationPolling\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"memoryLeakTest\")").click()
clickoncheckable(driver, "new UiSelector().text(\"memoryLeakTest\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"readingListLoginReminder\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListLoginReminder\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")", "false")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.SettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"Show images\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Show images\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Prefer offline content\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Prefer offline content\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Send usage reports\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Send usage reports\")", "true")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
finally :
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_008_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase008
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
swipe(driver, 0.5, 0.2, 0.5, 0.8)
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_008\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.wikipedia'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
os.popen("adb shell svc data enable")
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
b6008a92acebeb1a4b43cc9e053f074773796fb9
|
00b0cf3d93b7033e1f419b49a0278f5d463733b0
|
/script1.py
|
4f5b757d7c19b4eb20f0bd6b2e4b5a1aebbcf2d4
|
[] |
no_license
|
aramidetosin/Nornir-BGP-OSPF
|
df46364b439fbfaa53542eeffbc39eba415fa950
|
c6837d109c8ce33053af3b1c023952b2cd315c9c
|
refs/heads/master
| 2022-12-10T06:11:18.312519
| 2020-09-13T08:37:18
| 2020-09-13T08:37:18
| 288,286,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
from pyats.async_ import pcall
from genie.conf import Genie
from genie.utils import Dq
from genie.testbed import load
from rich import print
def get_ospf(hostname, dev):
#get show output from routing table
parsed = dev.parse('show ip route')
# use DQ to parse the OSPF routes from the routing table
get_routes = (Dq(parsed).contains('O').get_values('routes'))
# count the number of those OSPF entries
num_routes = len(get_routes)
print(f"[red]{hostname} has {num_routes} OSPF routes in it's routing table[/red]")
def main():
# load testbed
testbed = load('testbed.yaml')
# connect and suppress output
testbed.connect(log_stdout=False)
# use pcall to execute on all devices in parallel
pcall(get_ospf, hostname=testbed.devices, dev=testbed.devices.values())
if __name__ == "__main__":
main()
|
[
"aoluwatosin10@gmail.com"
] |
aoluwatosin10@gmail.com
|
fd01446c2f4d6707e0f766fe7bd1160a36c15b5b
|
358519772669c73092f625f630722c38e1d33783
|
/mccetools/examples/titrateHEWL_e8.py
|
cc186c9419106ff7bf6afa5e57a1598f9ca8d378
|
[] |
no_license
|
minghao2016/mmtools
|
e7e61aca084498408ceae965dd6c9450ad89eafa
|
3ade988afb51cd54ee5a4067d8deaad88afbb0fe
|
refs/heads/master
| 2021-09-21T01:02:22.522187
| 2014-09-19T03:40:03
| 2014-09-19T03:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import mmtools.mccetools.mcce as mcce
import os, sys
### INPUT LINES ####
#
# Specify the input and output PDB filenames
# NOTE: pdbfile and outpdbfile should be local (not absolute) paths for this project
pdbfile = '1e8l_model1.pdb'
outfile = os.path.join(os.curdir,'titrateHEWL_e8_pK.out')
# Specify the course of the pH titration
pHstart = 1.0
pHstep = 1.0
pHiters = 14
# Specify a MCCE parameter file with the desired set of parameters for calculating the pKa
prmfile = '../prmfiles/run.prm.quick'
prmfile = os.path.abspath(prmfile)
# Specify additional or different parameters than prmfile, if desired.
# xtraprms = {}
xtraprms = {'EPSILON_PROT':'8.0'} # NOTE: only eps=4.0 and eps=8.0 are supported!!!
# Write output PDB file with the correct protonation state
### work is done in a temporary dir; Setting cleanup=True will erase these temporary files
mcce.titratePDB(pdbfile, outfile, pHstart, pHstep, pHiters, os.environ['MCCE_LOCATION'], cleanup=False, prmfile=prmfile, xtraprms=xtraprms)
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
4f3dc08a6a651f2e44d73ffb4fad06cf5da0274d
|
3453fc365a2f2e24aaf9b9770d94560440aedc4c
|
/settings.py
|
80abdc05ac5b330ad2d2737b6b010880904d5a50
|
[] |
no_license
|
star1986xk/LOL_DB
|
6cd97e8f14fead69f0eac522d49be3cc3b19596a
|
5431e965f67c17e26152b842420ec292b79ab73e
|
refs/heads/master
| 2022-08-22T16:33:28.809745
| 2020-05-23T13:13:27
| 2020-05-23T13:13:27
| 266,339,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
SQL = {
'host': 'xxxx',
'user': 'xxxx',
'password': 'xxxxxx',
'database': 'lol',
'charset': 'utf8'
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
}
|
[
"173013905@qq.com"
] |
173013905@qq.com
|
2dcb3f7173cb22ab12265f533657a71a52f75d7b
|
8226d4d618b524bfe958232d9e7dff09378acaec
|
/cowrie/ssh/userauth.py
|
922be4a236cf9d621a03c5826e03103f692b64f3
|
[
"BSD-2-Clause"
] |
permissive
|
sergey-pronin/cowrie
|
4bb9d20a4a38fb338f1da8317f312bd61b777ffe
|
772cb2e41ea380a05e3d900fab4422f2583ec287
|
refs/heads/master
| 2021-05-14T06:38:08.606324
| 2018-01-03T04:49:57
| 2018-01-03T04:49:57
| 116,247,402
| 1
| 0
| null | 2018-01-04T10:34:00
| 2018-01-04T10:33:59
| null |
UTF-8
|
Python
| false
| false
| 6,132
|
py
|
# Copyright (c) 2009-2014 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
"""
This module contains ...
"""
from __future__ import division, absolute_import
import struct
from twisted.python import log
from twisted.python.compat import _bytesChr as chr
from twisted.internet import defer
from twisted.conch.interfaces import IConchUser
from twisted.conch.ssh import userauth
from twisted.conch.ssh.common import NS, getNS
from twisted.conch import error
from cowrie.core import credentials
class HoneyPotSSHUserAuthServer(userauth.SSHUserAuthServer):
"""
This contains modifications to the authentication system to do:
* Login banners (like /etc/issue.net)
* Anonymous authentication
* Keyboard-interactive authentication (PAM)
* IP based authentication
"""
def serviceStarted(self):
"""
"""
self.interfaceToMethod[credentials.IUsername] = b'none'
self.interfaceToMethod[credentials.IUsernamePasswordIP] = b'password'
self.interfaceToMethod[credentials.IPluggableAuthenticationModulesIP] = b'keyboard-interactive'
self.bannerSent = False
self._pamDeferred = None
userauth.SSHUserAuthServer.serviceStarted(self)
def sendBanner(self):
"""
Display contents of <honeyfs>/etc/issue.net
"""
if self.bannerSent:
return
self.bannerSent = True
try:
honeyfs = self.portal.realm.cfg.get('honeypot', 'contents_path')
issuefile = honeyfs + "/etc/issue.net"
data = open(issuefile).read()
except IOError:
return
if not data or not len(data.strip()):
return
self.transport.sendPacket(
userauth.MSG_USERAUTH_BANNER, NS(data) + NS(b'en'))
def ssh_USERAUTH_REQUEST(self, packet):
"""
"""
self.sendBanner()
return userauth.SSHUserAuthServer.ssh_USERAUTH_REQUEST(self, packet)
def auth_publickey(self, packet):
"""
We subclass to intercept non-dsa/rsa keys, or Conch will crash on ecdsa..
"""
algName, blob, rest = getNS(packet[1:], 2)
if not algName in (b'ssh-rsa', b'ssh-dsa'):
log.msg( "Attempted public key authentication with %s algorithm" % (algName,))
return defer.fail(error.ConchError("Incorrect signature"))
return userauth.SSHUserAuthServer.auth_publickey(self, packet)
def auth_none(self, packet):
"""
Allow every login
"""
c = credentials.Username(self.user)
srcIp = self.transport.transport.getPeer().host
return self.portal.login(c, srcIp, IConchUser)
def auth_password(self, packet):
"""
Overridden to pass src_ip to credentials.UsernamePasswordIP
"""
password = getNS(packet[1:])[0]
srcIp = self.transport.transport.getPeer().host
c = credentials.UsernamePasswordIP(self.user, password, srcIp)
return self.portal.login(c, srcIp,
IConchUser).addErrback(self._ebPassword)
def auth_keyboard_interactive(self, packet):
"""
Keyboard interactive authentication. No payload. We create a
PluggableAuthenticationModules credential and authenticate with our
portal.
Overridden to pass src_ip to credentials.PluggableAuthenticationModulesIP
"""
if self._pamDeferred is not None:
self.transport.sendDisconnect(
transport.DISCONNECT_PROTOCOL_ERROR,
"only one keyboard interactive attempt at a time")
return defer.fail(error.IgnoreAuthentication())
src_ip = self.transport.transport.getPeer().host
c = credentials.PluggableAuthenticationModulesIP(self.user,
self._pamConv, src_ip)
return self.portal.login(c, src_ip,
IConchUser).addErrback(self._ebPassword)
def _pamConv(self, items):
"""
Convert a list of PAM authentication questions into a
MSG_USERAUTH_INFO_REQUEST. Returns a Deferred that will be called
back when the user has responses to the questions.
@param items: a list of 2-tuples (message, kind). We only care about
kinds 1 (password) and 2 (text).
@type items: C{list}
@rtype: L{defer.Deferred}
"""
resp = []
for message, kind in items:
if kind == 1: # Password
resp.append((message, 0))
elif kind == 2: # Text
resp.append((message, 1))
elif kind in (3, 4):
return defer.fail(error.ConchError(
'cannot handle PAM 3 or 4 messages'))
else:
return defer.fail(error.ConchError(
'bad PAM auth kind %i' % (kind,)))
packet = NS(b'') + NS(b'') + NS(b'')
packet += struct.pack('>L', len(resp))
for prompt, echo in resp:
packet += NS(prompt)
packet += chr(echo)
self.transport.sendPacket(userauth.MSG_USERAUTH_INFO_REQUEST, packet)
self._pamDeferred = defer.Deferred()
return self._pamDeferred
def ssh_USERAUTH_INFO_RESPONSE(self, packet):
"""
The user has responded with answers to PAMs authentication questions.
Parse the packet into a PAM response and callback self._pamDeferred.
Payload::
uint32 numer of responses
string response 1
...
string response n
"""
d, self._pamDeferred = self._pamDeferred, None
try:
resp = []
numResps = struct.unpack('>L', packet[:4])[0]
packet = packet[4:]
while len(resp) < numResps:
response, packet = getNS(packet)
resp.append((response, 0))
if packet:
raise error.ConchError(
"{:d} bytes of extra data".format(len(packet)))
except:
d.errback(failure.Failure())
else:
d.callback(resp)
|
[
"michel@oosterhof.net"
] |
michel@oosterhof.net
|
8d1dcfdf120fab339b7e7a0c1a0a455b6cfa8730
|
8911d294dbdc2c1b415804ec36112db11ca56148
|
/Best_Buy/App_Best_Buy/urls.py
|
eae0c5b9dcc63d349485ff10c69706ba788a620c
|
[] |
no_license
|
generateintel/BestBuy_Scraper
|
8ab596fd0dd98bd4f57d3024f4e5862af67b0899
|
c6ffba85537250e41b0d450be8fafa4c96d004f7
|
refs/heads/master
| 2022-12-10T14:06:58.131437
| 2020-08-28T15:19:11
| 2020-08-28T15:19:11
| 291,078,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# import Views as Views
from django.conf.urls import url, include
from django.urls import path,include
from rest_framework import routers
from . import views
from .views import *
# urlpatterns = [
# ]
router=routers.DefaultRouter()
router.register(r'bestbuy(?:/(?P<id>[0-9]+))?', Best_Buy, 'bestbuy')#User apis
urlpatterns=[
path('', include(router.urls)),
]
|
[
"frazmirza58@gmail.com"
] |
frazmirza58@gmail.com
|
6992e6208c0a2fe642723a1efe1d37f8798929dd
|
bb8838e3eec624fd35a61d6d646f941eac1b266a
|
/saga/adaptors/cpi/filesystem/__init__.py
|
6eb35fd84bb36b6735f61432bc6f9c3a4a067591
|
[
"MIT"
] |
permissive
|
agrill/saga-python
|
55087c03e72635ffbb2fe1ca56b5cc02b7ff2094
|
35101e3a40d3cfcb39cb9f0d0c5f64c6f8de5930
|
refs/heads/master
| 2021-01-22T10:14:11.922145
| 2013-11-19T14:38:50
| 2013-11-19T14:38:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
from saga.adaptors.cpi.filesystem.file import File
from saga.adaptors.cpi.filesystem.directory import Directory
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"andre@merzky.net"
] |
andre@merzky.net
|
d59f6584aa265e16f758ceaa9eaaa52f77180d65
|
602bdbd1d8ef4d36ccfdcae5756bc8e448d30584
|
/share/pollen/yamlutil.py
|
a827b5412580893d319067869fc0182c3328adcc
|
[] |
no_license
|
timparkin/timparkingallery
|
1136027bf9cfbad31319958f20771a6fdc9f5fc4
|
6e6c02684a701817a2efae27e21b77765daa2c33
|
refs/heads/master
| 2016-09-06T00:28:16.965416
| 2008-11-25T21:15:45
| 2008-11-25T21:15:45
| 12,716
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
"""
Extension of PySyck that treats all scalars (implicit typing is switched off)
as UTF-8 encoded strings.
To convert scalars to specific types use the standard YAML syntax, i.e.
"!int 1".
"""
import syck
class Loader(syck.Loader):
def construct(self, node):
# Implicit typing is always disabled but we want unicode instances, not
# byte streams, where possible. So, if the node is a scalar and it's
# not been explicitly given a type then treat it as a utf-8 encoded
# string.
if node.kind == 'scalar' and node.tag is None:
return super(Loader, self).construct_python_unicode(node)
return super(Loader, self).construct(node)
def load(source, Loader=Loader):
return syck.load(source, Loader=Loader, implicit_typing=False)
if __name__ == '__main__':
import unittest
POUND = u'\xa3'
POUND_ENC = POUND.encode('utf-8')
class TestCase(unittest.TestCase):
def test_strings(self):
s = load("- foo\n- %s\n- !string %s" % (POUND_ENC, POUND_ENC))
self.assertEquals(s, [u'foo', POUND, POUND_ENC])
self.assertEquals(map(type, s), [unicode, unicode, str])
def test_likeNumbers(self):
s = load("- 1\n- 1.2")
self.assertEquals(s, [u'1', u'1.2'])
def test_explicitNumbers(self):
s = load("- !int 1\n- !float 1.2")
self.assertEquals(s, [1, 1.2])
self.assertEquals(map(type, s), [int, float])
unittest.main()
|
[
"info@timparkin.co.uk"
] |
info@timparkin.co.uk
|
ebca3fd3419746cb6ce74eb0a9f19695c7d634ac
|
b1c7a768f38e2e987a112da6170f49503b9db05f
|
/userprofile/migrations/0023_auto_20190315_1624.py
|
3deeef30d6004cff0aecc2b7f825b79a7f5bed21
|
[] |
no_license
|
Niladrykar/bracketerp
|
8b7491aa319f60ec3dcb5077258d75b0394db374
|
ca4ee60c2254c6c132a38ce52410059cc6b19cae
|
refs/heads/master
| 2022-12-11T04:23:07.504966
| 2019-03-18T06:58:13
| 2019-03-18T06:58:13
| 176,218,029
| 1
| 0
| null | 2022-12-08T03:01:46
| 2019-03-18T06:27:37
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
# Generated by Django 2.0.6 on 2019-03-15 10:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userprofile', '0022_services'),
]
operations = [
migrations.CreateModel(
name='Pro_services',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_name', models.CharField(blank=True, max_length=100)),
('details', models.CharField(blank=True, max_length=100)),
('service_type', models.CharField(blank=True, choices=[('Returns', 'Returns'), ('Communication', 'Communication'), ('License', 'License')], default='Returns', max_length=100)),
('duration', models.CharField(blank=True, choices=[('ANNUALLY', 'ANNUALLY'), ('QUARTERLY', 'QUARTERLY'), ('ONE TIME', 'ONE TIME')], default='ANNUALLY', max_length=100)),
('service_mode', models.CharField(blank=True, choices=[('ON-PREMISES', 'ON-PREMISES'), ('CALLS - VOIP', 'CALLS - VOIP'), ('COLLECTION FROM CLIENT', 'COLLECTION FROM CLIENT')], default='ON-PREMISES', max_length=100)),
('rate', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10)),
('User', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='services',
name='User',
),
migrations.DeleteModel(
name='Services',
),
]
|
[
"niladry.kar85@gmail.com"
] |
niladry.kar85@gmail.com
|
ea506e927f56df9e77c165cbf17c39260478d62d
|
48a7b266737b62da330170ca4fe4ac4bf1d8b663
|
/molsysmt/build/make_bioassembly.py
|
ace9b6f3a60d45deaeeb2648f73cdfcb64aea9d5
|
[
"MIT"
] |
permissive
|
uibcdf/MolSysMT
|
ddab5a89b8ec2377f383884c5169d147cab01322
|
c3d713ba63db24eb8a2426115cf8d9cb3665d225
|
refs/heads/main
| 2023-08-08T15:04:16.217967
| 2023-08-04T05:49:56
| 2023-08-04T05:49:56
| 137,937,243
| 15
| 3
|
MIT
| 2023-06-04T20:27:06
| 2018-06-19T19:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
from molsysmt._private.digestion import digest
import numpy as np
@digest()
def make_bioassembly(molecular_system, bioassembly=None, structure_indices=0, to_form=None):
"""
To be written soon...
"""
from molsysmt.basic import extract, merge, get
from molsysmt.structure import rotate, translate
if bioassembly is None:
aux_bioassemblies = get(molecular_system, bioassembly=True)
bioassembly = list(aux_bioassemblies.keys())[0]
bioassembly = aux_bioassemblies[bioassembly]
elif isinstance(bioassembly, str):
aux_bioassemblies = get(molecular_system, bioassembly=True)
bioassembly = aux_bioassemblies[bioassembly]
units = []
if _all_chains_equal(bioassembly):
chains = bioassembly['chain_indices'][0]
unit_0 = extract(molecular_system, structure_indices=0, selection='chain_index in @chains', syntax='MolSysMT')
for rotation, translation in zip(bioassembly['rotations'], bioassembly['translations']):
unit = rotate(unit_0, rotation=rotation)
unit = translate(unit, translation=translation)
units.append(unit)
else:
for chains, rotation, translation in zip(bioassembly['chain_indices'], bioassembly['rotations'], bioassembly['translations']):
unit = extract(molecular_system, structure_indices=0, selection='chain_index in @chains', syntax='MolSysMT')
unit = rotate(unit, rotation=rotation)
unit = translate(unit, translation=translation)
units.append(unit)
output = merge(units, to_form=to_form)
return output
def _all_chains_equal(bioassembly):
output = True
first_chains = bioassembly['chain_indices'][0]
for chains in bioassembly['chain_indices']:
if not np.all(chains==first_chains):
output = False
break
return output
|
[
"prada.gracia@gmail.com"
] |
prada.gracia@gmail.com
|
2161519984d316cb7df0e9c4a0aaf36ad2336703
|
d93a9c5e63612e26ce6d42b055caecac61f9e8f1
|
/src/analyse/cost_assumptions.py
|
2a7dfdfa8ebe97f09b9cb699dadcd36c0ab2b8d4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
timtroendle/geographic-scale
|
525bd9820128cd8340750cab59815555e1ed6520
|
81ec940e10b8e692429797e6a066a177e1508a89
|
refs/heads/master
| 2023-04-18T21:27:11.533352
| 2021-08-04T15:05:55
| 2021-08-04T15:05:55
| 170,466,995
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,642
|
py
|
import math
import calliope
import pandas as pd
import xarray as xr
EUR_PER_KW = 1 / 1e3 # from €/MW(h) to €/kW(h)
CT_PER_KW = 1e2 / 1e3 # from €/MW(h) to €ct/kW(h)
M_TO_1000KM = 1e-6
EPSILON = 1e-12
TECHS = {
"open_field_pv": "Utility-scale PV",
"roof_mounted_pv": "Rooftop PV",
"wind_onshore_monopoly": "Onshore wind",
"wind_offshore": "Offshore wind",
"biofuel": "Biofuel",
"hydro_run_of_river": "Hydropower run of river",
"hydro_reservoir": "Hydropower with reservoir",
"pumped_hydro": "Pumped hydro storage",
"battery": "Short term storage",
"hydrogen": "Long term storage",
"ac_transmission": "AC transmission^",
}
COST_SOURCES = {
"open_field_pv": "Ref. @JRC:2014 Table 7",
"roof_mounted_pv": "Ref. @JRC:2014 Table 9",
"wind_onshore_monopoly": "Ref. @JRC:2014 Table 4",
"wind_offshore": "Ref. @JRC:2014 Table 5",
"biofuel": "Ref. @JRC:2014 Table 48, ref. @RuizCastello:2015",
"hydro_run_of_river": "Ref. @JRC:2014 Table 14",
"hydro_reservoir": "Ref. @JRC:2014 Table 12",
"pumped_hydro": "Ref. @Schmidt:2019",
"battery": "Ref. @Schmidt:2019",
"hydrogen": "Ref. @Schmidt:2019",
"ac_transmission": "Ref. @JRC:2014 Table 39",
}
def main(path_to_model, scaling_factors, path_to_output):
"""Create table of important cost assumptions."""
model = calliope.read_netcdf(path_to_model)
eur_per_kw = scaling_factors["power"] / scaling_factors["monetary"] * EUR_PER_KW
ct_per_kw = scaling_factors["power"] / scaling_factors["monetary"] * CT_PER_KW
energy_cap = (model.get_formatted_array("cost_energy_cap")
.squeeze("costs")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0)
.drop("costs")) * eur_per_kw
energy_cap.loc["ac_transmission"] = transmission_investment_cost(model, eur_per_kw)
annual_cost = (model.get_formatted_array("cost_om_annual")
.squeeze("costs")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0)
.drop("costs")) * eur_per_kw
annual_cost.loc["ac_transmission"] = transmission_annual_cost(model, eur_per_kw)
storage_cap = (model.get_formatted_array("cost_storage_cap")
.squeeze("costs")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0)
.drop("costs")) * eur_per_kw
lifetime = (model.get_formatted_array("lifetime")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0))
lifetime.loc["ac_transmission"] = transmission_lifetime(model)
variable_costs_prod = (model.get_formatted_array("cost_om_prod")
.squeeze("costs")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0)
.drop("costs")) * ct_per_kw
variable_costs_con = (model.get_formatted_array("cost_om_con")
.squeeze("costs")
.reindex(techs=list(TECHS.keys()))
.groupby("techs")
.mean("locs")
.fillna(0)
.drop("costs")) * ct_per_kw
variable_costs = variable_costs_prod + variable_costs_con
all_costs = xr.Dataset({
"Overnight cost (€/kW)": energy_cap,
"Overnight cost (€/kWh)": storage_cap,
"Annual cost (€/kW/yr)": annual_cost,
"Variable cost (€ct/kWh)": variable_costs,
"Lifetime (yr)": lifetime,
"Source": pd.Series(COST_SOURCES).to_xarray().rename(index="techs")
})
all_costs.rename(techs="Technology").to_dataframe().rename(index=TECHS).to_csv(
path_to_output,
index=True,
header=True,
float_format="%.0f"
)
def transmission_investment_cost(model, scaling_factor):
cost = model.get_formatted_array("cost_energy_cap").squeeze("costs") * scaling_factor
distance = model.get_formatted_array("distance") * M_TO_1000KM
rel_costs = (cost / distance).to_series().dropna()
assert math.isclose(rel_costs.std(), 0, abs_tol=EPSILON)
return rel_costs.iloc[0]
def transmission_annual_cost(model, scaling_factor):
rel_cost = (
model
.get_formatted_array("cost_om_annual_investment_fraction")
.squeeze("costs")
.to_series()
.dropna()
)
assert math.isclose(rel_cost.std(), 0, abs_tol=EPSILON)
investment_cost = transmission_investment_cost(model, scaling_factor)
return rel_cost.iloc[0] * investment_cost
def transmission_lifetime(model):
lifetimes = model.get_formatted_array("lifetime")
return (lifetimes
.groupby(lifetimes.techs.where(~lifetimes.techs.str.contains("ac_transmission"), "ac_transmission"))
.mean(["techs", "locs"])
.sel(techs="ac_transmission")
.item())
if __name__ == "__main__":
main(
path_to_model=snakemake.input.model,
scaling_factors=snakemake.params.scaling_factors,
path_to_output=snakemake.output[0]
)
|
[
"tim.troendle@usys.ethz.ch"
] |
tim.troendle@usys.ethz.ch
|
75e594d130d4bb87a04ccd13b6cc04528faf8c26
|
e267c91f23055397201c3d9c23d7583b269d51b8
|
/backend/pugorugh/tests/test_serializers.py
|
9985fdce7df4cae83ff5caa706f06c94f6628da4
|
[] |
no_license
|
mcintoshsg/pug_or_ugh_v1
|
8678213b4b4ea09a70f369aa08002ff4a8194a29
|
3e735cd840ffc5a85497eab48518800f0757d9f3
|
refs/heads/master
| 2020-03-19T15:26:41.152968
| 2018-06-14T01:30:49
| 2018-06-14T01:30:49
| 136,670,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from pugorugh.serializers import (DogSerializer, UserPrefSerializer)
class DogSerializerTests(APITestCase):
'''SETUP '''
def setUp(self):
''' setup up dummy data for the Dog serializer '''
self.dog_1_data = {
'name': 'dog_1',
'image_filename': '1.jpg',
'breed': 'mutt',
'age': 12,
'gender': 'm',
'size': 'm'
}
def test_get_correct_value(self):
serializer = DogSerializer(data=self.dog_1_data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.data['name'],
self.dog_1_data['name']
)
class UserPrefSerializerTests(APITestCase):
'''SETUP '''
def setUp(self):
''' create user to be used in our dummy data '''
self.user_1 = User.objects.create(
username='test_user_1',
email='test_user_1@example.com',
password='password'
)
''' set up dummy data for UserPerf Serializer '''
self.user_pref_1 = {
'user': self.user_1,
'age': 'b,y',
'gender': 'm,f',
'size': 'l, xl'
}
def test_validate_userpref_bad_age(self):
self.user_pref_1['age'] = 'z'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertFalse(serializer.is_valid())
self.assertEqual(set(serializer.errors.keys()), set(['age']))
def test_validate_userpref_good_age(self):
self.user_pref_1['age'] = 's'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertTrue(serializer.is_valid())
def test_validate_userpref_bad_gender(self):
self.user_pref_1['gender'] = 'z'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertFalse(serializer.is_valid())
self.assertEqual(set(serializer.errors.keys()), set(['gender']))
def test_validate_userpref_good_gender(self):
self.user_pref_1['gender'] = 'm'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertTrue(serializer.is_valid())
def test_validate_userpref_bad_size(self):
self.user_pref_1['size'] = 'z'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertFalse(serializer.is_valid())
self.assertEqual(set(serializer.errors.keys()), set(['size']))
def test_validate_userpref_good_size(self):
self.user_pref_1['gender'] = 'm'
serializer = UserPrefSerializer(data=self.user_pref_1)
self.assertTrue(serializer.is_valid())
|
[
"s.g.mcintosh@gmail.com"
] |
s.g.mcintosh@gmail.com
|
c6f51cdc9597157d6863008f9a3144495adc25ba
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/download_and_save_csv.py
|
e075d5dfd7ffc58315af83cc1b7b33371336e92f
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743
| 2022-05-30T19:29:28
| 2022-05-30T19:29:28
| 11,463,325
| 5
| 8
| null | 2019-12-18T16:24:02
| 2013-07-17T00:16:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
"""
download_and_save_csv.py
From:
http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook
"""
from ij import IJ
from java.io import PrintWriter
content = IJ.openUrlAsString('http://cmci.info/imgdata/tenFrameResults.csv')
out = PrintWriter('/Users/jrminter/tmp/test1.csv')
out.print(content)
out.close()
|
[
"jrminter@gmail.com"
] |
jrminter@gmail.com
|
f514aae0d2c12d93b9d619bb80ff773cf0f9e077
|
7c67952f1c18d42f283f395d02294e148f3dd349
|
/export/tracking/apps.py
|
148b73edaf24675007991d8d3903bdb8ad3ae9cb
|
[] |
no_license
|
314casso/cportal
|
cfb4cc9e53819950177728bc5d42e47f2aa3d45e
|
a0a2043616241429571ec1b99302dada49af1485
|
refs/heads/master
| 2022-12-11T07:19:14.209367
| 2021-10-05T21:27:00
| 2021-10-05T21:27:00
| 110,969,703
| 0
| 0
| null | 2022-12-07T23:47:26
| 2017-11-16T12:37:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class TrackingConfig(AppConfig):
name = 'tracking'
|
[
"picasso75@yandex.ru"
] |
picasso75@yandex.ru
|
6db0fa2b73fdd334a453684a88261ba4cf2ee1cd
|
e1a71cc2773d94d1f6788f7ec830d3723b827745
|
/mayan/apps/redactions/tests/literals.py
|
d2c41f241b83003725ddd7d148d2aa308274f042
|
[
"Apache-2.0"
] |
permissive
|
azees-math/Mayan-EDMS
|
e6ddcee6f188b87e6d64990a85c5af7ad9b95b0c
|
4be3496b233f77d33e16376cb715a80286a50da2
|
refs/heads/master
| 2023-08-06T05:09:57.788757
| 2021-10-07T08:06:51
| 2021-10-07T08:06:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
import os
from django.conf import settings
TEST_REDACTION_ARGUMENT = "{'left': 10, 'top': 10, 'right': 10, 'bottom': 10}"
TEST_REDACTION_ARGUMENT_EDITED = "{'left': 20, 'top': 20, 'right': 20, 'bottom': 20}"
TEST_REDACTION_DOCUMENT_FILENAME = 'black_upper_left_corner.png'
TEST_REDACTION_DOCUMENT_PATH = os.path.join(
settings.BASE_DIR, 'apps', 'redactions', 'tests', 'contrib',
'sample_documents', TEST_REDACTION_DOCUMENT_FILENAME
)
|
[
"roberto.rosario@mayan-edms.com"
] |
roberto.rosario@mayan-edms.com
|
530588e4198ab812971feb9aac12ecfb9442af61
|
d9eef8dd3489682c8db41f2311e3058d1f369780
|
/.history/abel-network-files/mcmc_alg_implementation_own_two_20180701105942.py
|
7866626da6dfd8ef1f5122844411c56107d23daa
|
[] |
no_license
|
McKenzie-Lamb/Gerrymandering
|
93fe4a49fe39a0b307ed341e46ba8620ea1225be
|
b7a7c4129d6b0fcd760ba8952de51eafa701eac3
|
refs/heads/master
| 2021-01-25T06:06:43.824339
| 2018-10-16T14:27:01
| 2018-10-16T14:27:01
| 93,526,515
| 0
| 0
| null | 2018-07-12T19:07:35
| 2017-06-06T14:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,212
|
py
|
# Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import random
import numpy as np
import graph_tool.all as gt
from pathlib import Path
def create_graph_views(district_total_no):
graph_views = list()
for i in range(district_total_no):
main_graph_view = gt.GraphView(graph)
graph_view_check = main_graph_view.new_vertex_property("bool")
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
graph_view_check[j] = True
graph_view = gt.GraphView(main_graph_view, vfilt=graph_view_check)
graph_views.append(graph_view)
return graph_views
def turn_off_edges(districts_graphs):
turned_off_graphs = list()
# Iterate through districts and selects random edges
for district in range(len(districts_graphs)):
to_delete = districts_graphs[district].new_edge_property('bool')
edges = districts_graphs[district].get_edges()
selected = edges[np.random.randint(edges.shape[0], size = len(edges)//3.5), :]
for i in selected:
to_delete[i] = True
turned_off_graphs.append(gt.GraphView(districts_graphs[district], efilt=to_delete))
return turned_off_graphs
def get_cp_boundaries(graph, turned_on_graphs):
cp_boundary = list()
for g in range(len(turned_on_graphs)):
cp_label, hist = gt.label_components(turned_on_graphs[g])
labels = set(cp_label.a)
for l in labels:
cp = gt.find_vertex(turned_on_graphs[g], cp_label, l)
label_boun = 0
for v in cp:
vertex_bound = False
for n in graph.vertex(v).all_neighbors():
for g_two in range(len(turned_on_graphs)):
if g == g_two:
continue
try:
turned_on_graphs[g_two].vertex(n)
except ValueError:
continue
else:
vertex_bound = True
break
if vertex_bound == True:
label_boun += 1
break
if label_boun == len(cp):
cp_boundary.append(cp)
return cp_boundary
def get_non_adjacent_v(labels_in_boundaries, graph):
list_to_swap = random.sample(labels_in_boundaries, random.randint(2,len(labels_in_boundaries)//2))
index_to_del = list()
for l in range(len(list_to_swap)):
for v in range(len(list_to_swap[l])):
for l_two in range(len(list_to_swap)):
if l == l_two:
continue
for v_two in range(len(list_to_swap[l_two])):
if len(gt.shortest_path(graph, graph.vertex(list_to_swap[l][v]), graph.vertex(list_to_swap[l_two][v_two]))[0]) < 3:
index_to_del.append(l)
for i in range(len(list_to_swap)):
if i in index_to_del:
try:
del list_to_swap[i]
except IndexError:
print("Empty, Reapeating")
get_non_adjacent_v(labels_in_boundaries, graph)
return list_to_swap
def gather_districts_data(districts_graphs):
for i in range(len(districts_graphs)):
population = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["pop"] = population
districts_graphs[i].graph_properties["pop"] = 0
dem_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["dem_vote"] = dem_vote
districts_graphs[i].graph_properties["dem_vote"] = 0
rep_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["rep_vote"] = rep_vote
districts_graphs[i].graph_properties["rep_vote"] = 0
for v in districts_graphs[i].vertices():
districts_graphs[i].graph_properties["pop"] += graph.vp.data[v]["PERSONS"]
districts_graphs[i].graph_properties["dem_vote"] += graph.vp.data[v]["CONDEM14"]
districts_graphs[i].graph_properties["rep_vote"] += graph.vp.data[v]["CONREP14"]
print(districts_graphs[i].graph_properties["dem_vote"])
print(districts_graphs[i].graph_properties["rep_vote"])
def random_color():
return list(np.random.choice(range(256), size=3), 1)
def adjust_color(districts_graphs, color, ring_color):
for i in range(len(districts_graphs)):
ring_color_to = random_color()
print(ring_color_to)
if districts_graphs[i].graph_properties["dem_vote"] > districts_graphs[i].graph_properties["rep_vote"]:
color_ = (0,0,255,1)
else:
color_ = (255,0,0,1)
for v in districts_graphs[i].vertices():
color[v] = color_
ring_color[v] = ring_color_to
return color, ring_color
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph.gt"))
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
cp_label = graph.new_vertex_property("int")
# Init variables
district_total_no = 2
gt.graph_draw(graph, pos=graph.vp.pos,
output=str(main_folder / ('tmp.png')),
bg_color=(255, 255, 255, 1), vertex_text=graph.vertex_index,
vertex_fill_color=color, vertex_color = ring_color)
# Separates graph into blocks
districts = gt.minimize_blockmodel_dl(graph, district_total_no, district_total_no)
district_no = districts.get_blocks()
districts.draw(output='tmp.png', vertex_text=graph.vertex_index)
# Create the different graphs
districts_graphs = create_graph_views(district_total_no)
for i in range(len(districts_graphs)):
gt.graph_draw(
districts_graphs[i], pos=graph.vp.pos,
output=str(main_folder / ('tmp'+str(i)+'.png')),
bg_color=(255, 255, 255, 1))
turned_on_graphs = turn_off_edges(districts_graphs)
for i in range(len(districts_graphs)):
gt.graph_draw(
turned_on_graphs[i], pos=graph.vp.pos,bg_color=(255,255,255,1),vertex_size=2,
output=str(main_folder / ('tmp1'+str(i)+'.png')), vertex_text=graph.vertex_index)
labels_in_boundaries = get_cp_boundaries(graph, turned_on_graphs)
slected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
gather_districts_data(districts_graphs)
color, ring_color = adjust_color(districts_graphs, color, ring_color)
print(color[2])
print(ring_color[2])
print(ring_color[17])
gt.graph_draw(graph, pos=graph.vp.pos,
output=str(main_folder / ('tmp.png')),
bg_color=(255, 255, 255, 1), vertex_text=graph.vertex_index,vertex_color = ring_color, vertex_fill_color=color)
|
[
"gonzaleza@ripon.edu"
] |
gonzaleza@ripon.edu
|
cb8a337f99df522db71f2b13b2ef15b38319466d
|
d7753137a13c068cb0484bdc9a8237a36378db1b
|
/lintcode/array/search_in_rorated_sorted_array.py
|
52d5fbb376999b01e306bea3efa03460129f419d
|
[] |
no_license
|
alexkie007/offer
|
740b1e41b9d87de3b31df961c33371a5e3430133
|
85ceaf8f3da0efd66b4394ef16669ea673218265
|
refs/heads/master
| 2021-04-12T11:56:49.779558
| 2018-11-03T05:26:51
| 2018-11-03T05:26:51
| 126,163,525
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
class Solution:
@staticmethod
def search_in_rorated_sorted_array(nums, target):
if len(nums) < 1:
return -1
start = 0
end = len(nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if nums[mid] == target:
return mid
if nums[mid] > nums[start]:
if nums[mid] >= target >= nums[start]:
end = mid
else:
start = mid
else:
if nums[mid] <= target <= nums[end]:
start = mid
else:
end = mid
if nums[start] == target:
return start
if nums[end] == target:
return end
return -1
s = Solution()
print(s.search_in_rorated_sorted_array([4, 5, 6, 1, 2, 3], 1))
print(s.search_in_rorated_sorted_array([4, 5, 1, 2, 3], 0))
|
[
"alexkie@yeah.net"
] |
alexkie@yeah.net
|
ea4ee80048587bafcca6e3c883d30179a89772a6
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/eAnhzXPeGbobqk2P2_10.py
|
f3ba2d9fb2d0797b24386f1f2080ef7df5d01f84
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
"""
Create a function that takes in a year and returns the correct century.
### Examples
century(1756) ➞ "18th century"
century(1555) ➞ "16th century"
century(1000) ➞ "10th century"
century(1001) ➞ "11th century"
century(2005) ➞ "21st century"
### Notes
* All years will be between `1000` and `2010`.
* The 11th century is between 1001 and 1100.
* The 18th century is between 1701-1800.
"""
def century(year):
if year==1000:
return "10th century"
if year in range(1001,1101):
return "11th century"
if year in range(1101,1201):
return "12th century"
if year in range(1201,1301):
return "13th century"
if year in range(1301,1401):
return "14th century"
if year in range(1401,1501):
return "15th century"
if year in range(1501,1601):
return "16th century"
if year in range(1601,1701):
return "17th century"
if year in range(1701,1801):
return "18th century"
if year in range(1801,1901):
return "19th century"
if year in range(1901,2001):
return "20th century"
if year in range(2001,2010):
return "21st century"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
810266a72fa8a60275fcde3efd5f40941c1a6a9b
|
779a603f16a13c3cfc14f9923fae00b95430d041
|
/sentry.conf.py
|
4ff55b11ffacaa19364dcea51db814a33f8f66c2
|
[] |
no_license
|
ImmaculateObsession/sentry-server
|
ee4b8e3b54e8b220efd479ba74486891cbbd68bd
|
f23a7098565d166200e2ee90b5db12555fff31ea
|
refs/heads/master
| 2021-01-25T04:01:57.710957
| 2013-10-12T01:13:41
| 2013-10-12T01:13:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,749
|
py
|
# This file is just Python, with a touch of Django which means you
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s env variable" % var_name
raise ImproperlyConfigured(error_msg)
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
# If you change this, you'll also need to install the appropriate python
# package: psycopg2 (Postgres) or mysql-python
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
# If you're using Postgres, we recommend turning on autocommit
# 'OPTIONS': {
# 'autocommit': True,
# }
}
}
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
## CACHE ##
###########
# You'll need to install the required dependencies for Memcached:
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
###########
## Queue ##
###########
# See http://sentry.readthedocs.org/en/latest/queue/index.html for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
# You can enable queueing of jobs by turning off the always eager setting:
# CELERY_ALWAYS_EAGER = False
# BROKER_URL = 'redis://localhost:6379'
####################
## Update Buffers ##
####################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
# You'll need to install the required dependencies for Redis buffers:
# pip install redis hiredis nydus
#
# SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
# SENTRY_REDIS_OPTIONS = {
# 'hosts': {
# 0: {
# 'host': '127.0.0.1',
# 'port': 6379,
# }
# }
# }
################
## Web Server ##
################
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# header, and uncomment the following setting
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3, # the number of gunicorn workers
'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
#################
## Mail Server ##
#################
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
###########
## etc. ##
###########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = get_env_variable('SENTRY_KEY')
# http://twitter.com/apps/new
# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
# http://developers.facebook.com/setup/
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
# https://github.com/settings/applications/new
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
# https://trello.com/1/appKey/generate
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
# https://confluence.atlassian.com/display/BITBUCKET/OAuth+Consumers
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
|
[
"pjj@philipjohnjames.com"
] |
pjj@philipjohnjames.com
|
13faf6f38fe17b5382a1c81a9664af97121e9db8
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/tod/const.py
|
3b6f8c23e17435323c275b6e9c860f138def77d0
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
"""Constants for the Times of the Day integration."""
DOMAIN = "tod"
CONF_AFTER_TIME = "after_time"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_TIME = "before_time"
CONF_BEFORE_OFFSET = "before_offset"
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
b3018f21a87b0c01ed7dde5c583582ff0924abff
|
a4deea660ea0616f3b5ee0b8bded03373c5bbfa2
|
/concrete_instances/register-variants/mulq_r64/instructions/mulq_r64/mulq_r64.gen.vex.py
|
f053dc5ee41260d5e4a5978c50a17d898231afb4
|
[] |
no_license
|
Vsevolod-Livinskij/x86-64-instruction-summary
|
4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd
|
c276edab1b19e3929efb3ebe7514489f66087764
|
refs/heads/master
| 2022-02-02T18:11:07.818345
| 2019-01-25T17:19:21
| 2019-01-25T17:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
import angr
proj = angr.Project('./instructions/mulq_r64/mulq_r64.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp()
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
30844235afba20a859c1af14b83712062e315731
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Control/AthenaExamples/AthExThinning/share/ReadNonSlimmedData_jobOptions.py
|
38f99092450c5b1a4e3e2f5c68191d7da6279a5f
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,756
|
py
|
###############################################################
#
# Job options file
#
#==============================================================
#
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon.Constants import VERBOSE,DEBUG,INFO,WARNING,ERROR
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed
if 'EVTMAX' not in dir() :
EVTMAX = -1
pass
theApp.EvtMax = EVTMAX
#--------------------------------------------------------------
# Load POOL support
#--------------------------------------------------------------
import AthenaPoolCnvSvc.ReadAthenaPool
if 'INPUT' not in dir():
INPUT = ["non.slimmed.my.data.pool"]
svcMgr.EventSelector.InputCollections = INPUT
svcMgr.PoolSvc.ReadCatalog = [ 'xmlcatalog_file:PoolFileCatalog.xml' ]
svcMgr.PoolSvc.WriteCatalog = 'xmlcatalog_file:PoolFileCatalog.xml'
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
#####################################################
# read back AthExFatObject
#####################################################
from AthExThinning.Lib import PyReadFatObject
topSequence += PyReadFatObject(
"ReadFatObject",
particles = "Particles_test1",
fatobject = "FatObject_test1",
OutputLevel = INFO
)
#--------------------------------------------------------------
# POOL Persistency
#--------------------------------------------------------------
import AthenaPoolCnvSvc.WriteAthenaPool as wap
outStream = wap.AthenaPoolOutputStream("StreamUSR")
if 'OUTPUT' not in dir():
OUTPUT = "reaccessed.%s" % INPUT[0]
svcMgr.PoolSvc.CheckDictionary = True
# Stream's output file
outStream.OutputFile = OUTPUT
# Event Info
outStream.ItemList = [
"EventInfo#*",
"AthExParticles#*",
"AthExDecay#*",
"AthExElephantino#*",
"AthExFatObject#*",
]
svcMgr.AthenaPoolCnvSvc.CommitInterval = 10
##############################################################
#
# Customise OutputLevel
#
##############################################################
# OUTPUT PRINTOUT LEVEL
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
svcMgr.MessageSvc.useColors = False
svcMgr.MessageSvc.defaultLimit = 4000000
svcMgr.MessageSvc.OutputLevel = ERROR
#==============================================================
#
# End of job options file
#
###############################################################
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
aa760ca3f7d7c9e12b8ef69e0da9c1b134bc975e
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/chrome/installer/linux/debian/package_version_interval.py
|
b386fb69728bdb3c6f0077166773d44d2f2e85a1
|
[
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 4,844
|
py
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import deb_version
class PackageVersionIntervalEndpoint:
def __init__(self, is_open, is_inclusive, version):
self._is_open = is_open;
self._is_inclusive = is_inclusive
self._version = version
def _intersect(self, other, is_start):
if self._is_open and other._is_open:
return self
if self._is_open:
return other
if other._is_open:
return self
cmp_code = self._version.__cmp__(other._version)
if not is_start:
cmp_code *= -1
if cmp_code > 0:
return self
if cmp_code < 0:
return other
if not self._is_inclusive:
return self
return other
def __str__(self):
return 'PackageVersionIntervalEndpoint(%s, %s, %s)' % (
self._is_open, self._is_inclusive, self._version)
def __eq__(self, other):
if self._is_open and other._is_open:
return True
return (self._is_open == other._is_open and
self._is_inclusive == other._is_inclusive and
self._version == other._version)
class PackageVersionInterval:
def __init__(self, string_rep, package, start, end):
self.string_rep = string_rep
self.package = package
self.start = start
self.end = end
def contains(self, version):
if not self.start._is_open:
if self.start._is_inclusive:
if version < self.start._version:
return False
elif version <= self.start._version:
return False
if not self.end._is_open:
if self.end._is_inclusive:
if version > self.end._version:
return False
elif version >= self.end._version:
return False
return True
def intersect(self, other):
return PackageVersionInterval(
'', '', self.start._intersect(other.start, True),
self.end._intersect(other.end, False))
def implies(self, other):
if self.package != other.package:
return False
return self.intersect(other) == self
def __str__(self):
return 'PackageVersionInterval(%s)' % self.string_rep
def __eq__(self, other):
return self.start == other.start and self.end == other.end
class PackageVersionIntervalSet:
def __init__(self, intervals):
self.intervals = intervals
def formatted(self):
return ' | '.join([interval.string_rep for interval in self.intervals])
def _interval_implies_other_intervals(self, interval, other_intervals):
for other_interval in other_intervals:
if interval.implies(other_interval):
return True
return False
def implies(self, other):
# This disjunction implies |other| if every term in this
# disjunction implies some term in |other|.
for interval in self.intervals:
if not self._interval_implies_other_intervals(interval, other.intervals):
return False
return True
def version_interval_endpoints_from_exp(op, version):
open_endpoint = PackageVersionIntervalEndpoint(True, None, None)
inclusive_endpoint = PackageVersionIntervalEndpoint(False, True, version)
exclusive_endpoint = PackageVersionIntervalEndpoint(False, False, version)
if op == '>=':
return (inclusive_endpoint, open_endpoint)
if op == '<=':
return (open_endpoint, inclusive_endpoint)
if op == '>>' or op == '>':
return (exclusive_endpoint, open_endpoint)
if op == '<<' or op == '<':
return (open_endpoing, exclusive_endpoint)
assert op == '='
return (inclusive_endpoint, inclusive_endpoint)
def parse_dep(dep):
"""Parses a package and version requirement formatted by dpkg-shlibdeps.
Args:
dep: A string of the format "package (op version)"
Returns:
A PackageVersionInterval.
"""
package_name_regex = '[a-z][a-z0-9\+\-\.]+'
match = re.match('^(%s)$' % package_name_regex, dep)
if match:
return PackageVersionInterval(dep, match.group(1),
PackageVersionIntervalEndpoint(True, None, None),
PackageVersionIntervalEndpoint(True, None, None))
match = re.match('^(%s) \(([\>\=\<]+) ([\~0-9A-Za-z\+\-\.\:]+)\)$' %
package_name_regex, dep)
if match:
(start, end) = version_interval_endpoints_from_exp(
match.group(2), deb_version.DebVersion(match.group(3)))
return PackageVersionInterval(dep, match.group(1), start, end)
print >> sys.stderr, 'Failed to parse ' + dep
sys.exit(1)
def parse_interval_set(deps):
"""Parses a disjunction of package version requirements.
Args:
deps: A string of the format
"package \(op version\) (| package \(op version\))*"
Returns:
A list of PackageVersionIntervals
"""
return PackageVersionIntervalSet(
[parse_dep(dep.strip()) for dep in deps.split('|')])
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
88c81e959b5d2d8b8108c70ef1ae00c46a5f20a4
|
7ff333dd18ebea4159160b07c2e281461e021e25
|
/parsers/linux_software_parser.py
|
bab26705bf0a9b28917c545ee615ac75f91acce5
|
[
"Apache-2.0",
"DOC"
] |
permissive
|
defaultnamehere/grr
|
d768240ea8ffc9d557f5fe2e272937b83398b6e3
|
ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e
|
refs/heads/master
| 2021-01-21T19:09:18.863900
| 2014-12-07T01:49:53
| 2014-12-07T01:49:53
| 27,655,857
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
#!/usr/bin/env python
"""Simple parsers for Linux files."""
import re
from debian_bundle import deb822
from grr.lib import parsers
from grr.lib import rdfvalue
class DebianPackagesStatusParser(parsers.FileParser):
"""Parser for /var/lib/dpkg/status. Yields SoftwarePackage semantic values."""
output_types = ["SoftwarePackage"]
supported_artifacts = ["DebianPackagesStatus"]
installed_re = re.compile(r"^\w+ \w+ installed$")
def Parse(self, stat, file_object, knowledge_base):
"""Parse the status file."""
_, _ = stat, knowledge_base
try:
for pkg in deb822.Packages.iter_paragraphs(file_object):
if self.installed_re.match(pkg["Status"]):
soft = rdfvalue.SoftwarePackage(
name=pkg["Package"],
description=pkg["Description"],
version=pkg["Version"],
architecture=pkg["Architecture"],
publisher=pkg["Maintainer"],
install_state="INSTALLED")
yield soft
except SystemError:
yield rdfvalue.Anomaly(type="PARSER_ANOMALY",
symptom="Invalid dpkg status file")
|
[
"amoser@google.com"
] |
amoser@google.com
|
1f83f61dfa9497557e6936eb982a70d2efc4f3d7
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j05369+3955/sdB_galex_j05369+3955_coadd.py
|
491e3294e315021e4dbeb2b572fa220d4d65db96
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[84.234625,39.921497], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j05369+3955/sdB_galex_j05369+3955_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j05369+3955/sdB_galex_j05369+3955_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
e5ea45b50f4220f2ee941a580567bfd3619b7a03
|
55c1218bdd3f87554b3e462ab3609d34442a427e
|
/ch06/codeListing06-8.py
|
a85ed3c9ddbee9cc137d4e073bdec128d50112b4
|
[] |
no_license
|
oilmcut2019/Teaching_material_python
|
b0b0706ea14c9ef70ddabb3ec705e4be7f7783aa
|
28fd3c344c49d004e20322e8d33b1f0bfec38e0c
|
refs/heads/master
| 2020-05-18T16:43:40.805479
| 2019-05-02T06:47:36
| 2019-05-02T06:47:36
| 184,533,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
def get_vowels_in_word(word):
"""Return vowels in string word--include repeats."""
vowel_str = "aeiou"
vowels_in_word = ""
for char in word:
if char in vowel_str:
vowels_in_word += char
return vowels_in_word
|
[
"m07158031@o365.mcut.edu.tw"
] |
m07158031@o365.mcut.edu.tw
|
823128508170e7b6f582e87466c71dc6f760b04d
|
cf7d96bdd34205ede987f0985dfc9e3ab415ee06
|
/visual_export/spreadsheet/base.py
|
c910b70539f3680f0a48fb6616e889e1eccc7a93
|
[] |
no_license
|
hendrasaputra0501/btxjalan
|
afc93467d54a6f20ef6ac46f7359e964ad5d42a0
|
d02bc085ad03efc982460d77f7af1eb5641db729
|
refs/heads/master
| 2020-12-30T11:02:05.416120
| 2017-07-31T01:34:08
| 2017-07-31T01:34:08
| 98,836,234
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# -*- coding: utf-8 -*-
from cStringIO import StringIO
class SpreadSheetBase(object):
document = None
table = None
def __init__(self, title):
self.title = title
def tofile(self):
if self.document is None:
raise Exception('No document found')
fp = StringIO()
self.document.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
def AddRow(self, style=None):
raise Exception('Not implemented')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"hendrasaputra0501@gmail.com"
] |
hendrasaputra0501@gmail.com
|
c1fbb8cbba54fa88ad00b3eaefacc4651f85a474
|
57397e9891525c53d7d02c91634e917e1de74772
|
/src/procesamiento/python/PSD2RS.py
|
1e572a6acb6b68555a9a2ee8cdd39cd2814cd5f1
|
[] |
no_license
|
gdiazh/pruebas_vibraciones
|
614964ad4fb2bb7770f67f885986f3b65e2c0571
|
0baabc76a6cd9444e6cedac8da14b0a78169b34e
|
refs/heads/master
| 2020-05-18T01:56:47.861642
| 2019-04-30T18:48:06
| 2019-04-30T18:48:06
| 184,103,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 15:13:08 2018
General approach
PSD1: MIL-STD-1540C Acceptance Level
PSD2: NASA random vibration for mass less than 22.7 kg (Acceptable condition in CubeSat)
Conservator Damping ratio = 0.05
@author: Elias Obreque
"""
import matplotlib.pyplot as plt
import numpy as np
# PSD input: MIL-STD-1540C
#HZ = [20, 150, 600, 2000]
#GRMS2 = [0.0053, 0.04, 0.04, 0.0036]
# PSD input: NASA
HZ = [20, 50, 800, 2000]
GRMS2 = [0.013, 0.08, 0.08, 0.013]
#==========================================================================
# Natural frequency
FN = np.arange(20, 2010, 10)
xi = 0.05
#============================================================================
# Slope of the curve
m = []
for i in range(1, len(HZ)):
base = np.log10(HZ[i]) - np.log10(HZ[i - 1])
alt = np.log10(GRMS2[i]) - np.log10(GRMS2[i - 1])
m.append(alt/base)
# GRMS2(f)
def G_f(f):
Gn = 0
for i in range(1, len(HZ)):
if f >= HZ[i - 1] and f <= HZ[i]:
Const = GRMS2[i - 1]/HZ[i - 1]**m[i-1]
Gn = Const*f**m[i-1]
elif f > max(HZ):
Const = GRMS2[-1]/HZ[-1]**m[-1]
Gn = Const*f**m[-1]
return Gn
def AreaRA(fx, x):
areaRA = 0
dx = x[1] - x[0]
for i in range(len(x)):
areaRA = areaRA + fx[i]*dx
return areaRA**0.5
#============================================================================
areaPSD = 0
for i in range(1, len(HZ)):
base = HZ[i] - HZ[i -1]
alt = np.abs(GRMS2[i] - GRMS2[i - 1])
slope = (np.log(GRMS2[i]) - np.log(GRMS2[i - 1]))/(np.log(HZ[i]) - np.log(HZ[i - 1]))
offset = GRMS2[i -1]/HZ[i - 1]**slope
if slope != -1:
areaPSD = areaPSD + (offset/(slope + 1))*(HZ[i]**(slope+1) - HZ[i - 1]**(slope+1))
else:
areaPSD = areaPSD + offset*(np.log(HZ[i]) - np.log(HZ[i - 1]))
Grms = np.sqrt(areaPSD)
GPeak = np.sqrt(2)*Grms
print('\nValor Grms: ', Grms)
print('Valor Gpeak: ', GPeak,"\n")
#==========================================================================
Acc = []
F = np.linspace(min(HZ), 2000, 10000)
df = F[1] - F[0]
k = 0
for fn in FN:
Acc.append([])
for i in range(len(F)):
p = F[i]/fn
C = (1 + (2*xi*p)**2)/((1 - p**2)**2 + (2*xi*p)**2)
Acc[k].append(C*G_f(F[i]))
k = k + 1
AreaGRMS = []
for m in range(len(FN)):
AreaGRMS.append(AreaRA(Acc[m], F))
#print("Response Accel (GRMS) [",FN[m], "Hz] =",AreaGRMS[m])
#==========================================================================
maxAccGRMS = max(AreaGRMS)
k = list(AreaGRMS).index(maxAccGRMS)
maxFn = FN[k]
print('Worst-Case point is:', maxAccGRMS,"g at", maxFn,"Hz")
#==========================================================================
# PLOT
#%%
textlegeng = []
plt.figure(1)
plt.title('Responce Power Spectral Density Curves')
plt.ylabel('PSD [$G^2 / Hz$]')
plt.xlabel('Frequency [$Hz$]')
plt.yscale('log')
plt.xscale('log')
for i in np.arange(0, int(100/20), 3):
plt.plot(F, Acc[i], '--')
textlegeng.append(str(FN[i]) +" Hz")
for i in np.arange(int(100/20) + 3, len(Acc), 50):
plt.plot(F, Acc[i], '--')
textlegeng.append(str(FN[i]) +" Hz")
plt.plot(HZ, GRMS2, 'k')
textlegeng.append("PSD")
plt.legend(textlegeng)
plt.ylim(0.001, 15)
plt.xlim(10, 10000)
plt.grid(which='both', axis='both')
plt.show()
plt.figure(2)
plt.title('Vibration Response Spectrum')
plt.ylabel('Accel [$G_{RMS}$]')
plt.xlabel('Frequency [$Hz$]')
plt.yscale('log')
plt.xscale('log')
plt.plot(FN, AreaGRMS)
plt.grid(which='both', axis='both')
plt.show()
|
[
"g.hernan.diaz@gmail.com"
] |
g.hernan.diaz@gmail.com
|
2e28a19cee408ed5614c805add202c7ba74dc8d9
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/070_Climbing_Stairs.py
|
05f4b4ef7410b23e15f28f34f471ea6cc27c358c
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 890
|
py
|
c_ Solution o..
# def climbStairs(self, n):
# """
# :type n: int
# :rtype: int
# """
# dp = [0] * (n + 1)
# dp[0] = 1
# dp[1] = 1
# for i in range(2, n + 1):
# dp[i] = dp[i - 2] + dp[i- 1]
# return dp[n]
___ climbStairs n
__ n <= 1:
r_ 1
dp = [1] * 2
___ i __ r.. 2, n + 1
dp[1], dp[0] = dp[1] + dp[0], dp[1]
r_ dp[1]
# C = {1: 1, 2: 2}
# def climbStairs(self, n):
# """
# :type n: int
# :rtype: int
# """
# if n in Solution.C:
# return Solution.C[n]
# else:
# result = Solution.C.get(n - 1, self.climbStairs(n - 1)) + \
# Solution.C.get(n - 2, self.climbStairs(n - 2))
# Solution.C[n] = result
# return result
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
2a9667d79338cfcbe2d9bf465b8957a4e4ab8d3a
|
1cf7c11711303fc21c37fc091b2eefc30bc489c6
|
/moderate/number_pairs.py
|
88dee243e7fcc46404c1bcfac8a89aefbebe0783
|
[] |
no_license
|
yamaton/codeeval
|
1c68b23459b6329c42e046f07bd19b4cecafb95f
|
eacd28106f76364d44fae9f6a4c2860711ea0dcc
|
refs/heads/master
| 2020-04-15T05:53:11.929711
| 2013-09-06T18:48:29
| 2013-09-06T18:48:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
number_pairs.py
Created by Yamato Matsuoka on 2012-07-17.
Description:
You are given a sorted array of positive integers and a number 'X'. Print out all pairs of numbers whose sum is equal to X. Print out only unique pairs and the pairs should be in ascending order
Input sample:
Your program should accept as its first argument a filename. This file will contain a comma separated list of sorted numbers and then the sum 'X', separated by semicolon. Ignore all empty lines. If no pair exists, print the string NULL eg.
1,2,3,4,6;5
2,4,5,6,9,11,15;20
1,2,3,4;50
Output sample:
Print out the pairs of numbers that equal to the sum X. The pairs should themselves be printed in sorted order i.e the first number of each pair should be in ascending order .e.g.
1,4;2,3
5,15;9,11
NULL
"""
import sys
def read_data(line):
x = line.rstrip().split(";")
seq = [int(i) for i in x[0].split(",")]
N = int(x[-1])
return (seq, N)
def find_pairs(seq, X):
"""Find number pairs from a sorted list, seq,
such that sum of each pair is X."""
bag1 = [i for i in seq if i <= X/2]
bag2 = [i for i in seq if i > X/2]
out = []
for i in bag1:
j = X - i
if j in bag2:
out.append((i,j))
return out
def format(lis):
"""Construct formatted string from a list of pairs"""
if lis:
return ";".join(",".join(str(i) for i in n) for n in lis)
else:
return "NULL"
if __name__ == '__main__':
with open(sys.argv[1], "r") as f:
data = [read_data(line) for line in f]
out = (find_pairs(seq, X) for (seq, X) in data)
formatted = "\n".join(format(x) for x in out)
print formatted
|
[
"yamaton@gmail.com"
] |
yamaton@gmail.com
|
44aed721687e6b4f500f365adb3852fb95f20ddd
|
5c465756e28ae021e7afac1dddbc6e11ec8c00ec
|
/setup.py
|
d405b626d8f809bb3cd8e9d499465ec7b331ae22
|
[
"Apache-2.0"
] |
permissive
|
waynew/pop
|
f9ffca2b98993ba1c6ddc95fdc66599c1eedfeff
|
d3b6128ada34ee31b16b9c6e1c600b3e059f4e31
|
refs/heads/master
| 2020-05-25T15:35:29.859674
| 2019-05-21T16:10:27
| 2019-05-21T16:10:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import python libs
import os
import sys
import shutil
from setuptools import setup, Command
NAME = 'pop'
DESC = ('The Plugin Oriented Programming System')
# Version info -- read without importing
_locals = {}
with open('pop/version.py') as fp:
exec(fp.read(), None, _locals)
VERSION = _locals['version']
SETUP_DIRNAME = os.path.dirname(__file__)
if not SETUP_DIRNAME:
SETUP_DIRNAME = os.getcwd()
class Clean(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for subdir in ('pop', 'tests'):
for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)):
for dir_ in dirs:
if dir_ == '__pycache__':
shutil.rmtree(os.path.join(root, dir_))
def discover_packages():
modules = []
for package in ('pop', ):
for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, package)):
pdir = os.path.relpath(root, SETUP_DIRNAME)
modname = pdir.replace(os.sep, '.')
modules.append(modname)
return modules
setup(name=NAME,
author='Thomas S Hatch',
author_email='thatch@saltstack.com',
url='https://saltstack.com',
version=VERSION,
description=DESC,
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Development Status :: 5 - Production/Stable',
],
scripts=['scripts/pop_seed'],
packages=discover_packages(),
cmdclass={'clean': Clean},
)
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
9964c1a1e4d067a3b5d76587f7a21ab1eeb8d4e8
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/calendar_sms/models/calendar.py
|
26a857dd032ad94c770a8f3a0e11e031594e3c22
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264
| 2019-04-30T14:43:46
| 2019-04-30T14:43:46
| 184,268,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
_logger = logging.getLogger(__name__)
class CalendarEvent(models.Model):
_inherit = 'calendar.event'
def _get_default_sms_recipients(self):
""" Method overriden from mail.thread (defined in the sms module).
SMS text messages will be sent to attendees that haven't declined the event(s).
"""
return self.mapped('attendee_ids').filtered(lambda att: att.state != 'declined').mapped('partner_id')
def _do_sms_reminder(self):
""" Send an SMS text reminder to attendees that haven't declined the event """
for event in self:
sms_msg = _("Event reminder: %s on %s.") % (event.name, event.start_datetime or event.start_date)
note_msg = _('SMS text message reminder sent !')
event.message_post_send_sms(sms_msg, note_msg=note_msg)
class CalendarAlarm(models.Model):
_inherit = 'calendar.alarm'
type = fields.Selection(selection_add=[('sms', 'SMS Text Message')])
class AlarmManager(models.AbstractModel):
_inherit = 'calendar.alarm_manager'
@api.model
def get_next_mail(self):
""" Cron method, overriden here to send SMS reminders as well
"""
result = super(AlarmManager, self).get_next_mail()
now = fields.Datetime.to_string(fields.Datetime.now())
last_sms_cron = self.env['ir.config_parameter'].get_param('calendar_sms.last_sms_cron', default=now)
cron = self.env['ir.model.data'].get_object('calendar', 'ir_cron_scheduler_alarm')
interval_to_second = {
"weeks": 7 * 24 * 60 * 60,
"days": 24 * 60 * 60,
"hours": 60 * 60,
"minutes": 60,
"seconds": 1
}
cron_interval = cron.interval_number * interval_to_second[cron.interval_type]
events_data = self.get_next_potential_limit_alarm('sms', seconds=cron_interval)
for event in self.env['calendar.event'].browse(events_data):
max_delta = events_data[event.id]['max_duration']
if event.recurrency:
found = False
for event_start in event._get_recurrent_date_by_event():
event_start = event_start.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(event_start, event, max_delta, 0, 'sms', after=last_sms_cron, missing=True)
for alert in last_found:
event.browse(alert['event_id'])._do_sms_reminder()
found = True
if found and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
event_start = fields.Datetime.from_string(event.start)
for alert in self.do_check_alarm_for_one_date(event_start, event, max_delta, 0, 'sms', after=last_sms_cron, missing=True):
event.browse(alert['event_id'])._do_sms_reminder()
self.env['ir.config_parameter'].set_param('calendar_sms.last_sms_cron', now)
return result
|
[
"50145400+gilbertp7@users.noreply.github.com"
] |
50145400+gilbertp7@users.noreply.github.com
|
7ac9b3d42f3f46d4e3db59414eb2c88a9ebeaff6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03137/s045740522.py
|
2b93ba5faee17d63aa69bda2f022a1c7348ac45b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
import math
import sys
import collections
import bisect
def main():
n, m = map(int, input().split())
x = sorted(list(map(int, input().split())))
if m <= n:
print(0)
return
y = sorted([x[i + 1] - x[i] for i in range(m - 1)])
print(sum(y[0:(m-n)]))
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
95eee9b7c9240054cd6ba3174b1915da8273f6bc
|
9242319ca7796c6a3b18e760ddbf8290944d4b49
|
/flock/frontend/amp.py
|
933a74893288d7941edc8e3bc315d4091639d9e7
|
[
"MIT"
] |
permissive
|
MainRo/python-flock
|
79cfd7ce4edab40439c556b6621768438868d16c
|
e1faa78d6aba374493336651848daadad82387a8
|
refs/heads/master
| 2021-01-10T19:16:52.907538
| 2015-11-18T21:15:38
| 2015-11-18T21:15:38
| 29,210,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
import logging
import json
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.protocols import amp
from twisted.internet.endpoints import TCP4ServerEndpoint
from flock.router import Router
from flock.message import FlockMessage
# client API
class MessageReceived(amp.Command):
arguments = [('message', amp.String())]
response = [('status', amp.Boolean())]
class SetState(amp.Command):
arguments = [('message', amp.String())]
response = [('status', amp.Boolean())]
class FlockServer(amp.AMP):
def connectionMade(self):
router = Router.instantiate()
router.attach_frontend(self)
logging.debug("connected")
def connectionLost(self, reason):
router = Router.instantiate()
router.detach_frontend(self)
logging.debug("disconnected")
@SetState.responder
def SetState(self, message):
logging.debug("set_state" + message)
message = json.loads(message)
action = FlockMessage()
action.uid = message['id']
action.attributes[FlockMessage.MSG_ATTRIBUTE_SWITCH_BISTATE] = message['state']
action.type = FlockMessage.Type.set
action.namespace = 'controller'
router = Router.instantiate()
router.call(action)
return {'status': True}
def event(self, message):
"""
Sends the received message to the endpoint serialized as javascript.
@todo flatten message as AMP fields.
"""
legacy_message = {}
legacy_message['protocol'] = 'flock'
legacy_message['device_id'] = message.uid
legacy_message['private_data'] = ''
legacy_message['attributes'] = message.attributes
json_message = json.dumps(legacy_message, default=lambda o: o.__dict__, sort_keys=True, indent=4)
self.callRemote(MessageReceived, message=json_message)
return
class FlockServerFactory(Factory):
def buildProtocol(self, addr):
return FlockServer()
class Frontend(object):
def __init__(self, port, reactor):
endpoint = TCP4ServerEndpoint(reactor, port, interface='localhost')
endpoint.listen(FlockServerFactory())
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
6880c878362b0b48c05526b91b9d4518b7206f2b
|
2ee29ea10cc2ad5577a2f8e7ed0fa1351d451a52
|
/django/bin/tox
|
19cb4c36ac6406b19087eb92ec3ba72ce35c0bf1
|
[] |
no_license
|
guille1194/votbit2
|
6d1c792f4f43cdea25e31a8fbb8e9f1e20d9670b
|
63497d17a249c082730f39cc54caf0e2c1d4dc3c
|
refs/heads/master
| 2021-04-22T13:26:22.635235
| 2016-12-03T21:16:26
| 2016-12-03T21:16:26
| 75,500,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/home/guillermo/Documentos/votbit2/django/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tox import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
|
[
"guille1194@gmail.com"
] |
guille1194@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.