blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
476f7082b9b260260f80644006a9ada67ee8fcb7
|
25b914aecd6b0cb49294fdc4f2efcfdf5803cc36
|
/homeassistant/components/color_extractor/__init__.py
|
73e8e09101c38ea10b25ef93ae14eb5b08dbdd04
|
[
"Apache-2.0"
] |
permissive
|
jason0x43/home-assistant
|
9114decaa8f7c2f1582f84e79dc06736b402b008
|
8bf6aba1cf44ee841de063755c935ea78040f399
|
refs/heads/dev
| 2023-03-04T01:14:10.257593
| 2022-01-01T12:11:56
| 2022-01-01T12:11:56
| 230,622,861
| 1
| 1
|
Apache-2.0
| 2023-02-22T06:15:07
| 2019-12-28T14:45:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,528
|
py
|
"""Module for color_extractor (RGB extraction from images) component."""
import asyncio
import io
import logging
from PIL import UnidentifiedImageError
import aiohttp
import async_timeout
from colorthief import ColorThief
import voluptuous as vol
from homeassistant.components.light import (
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SERVICE_TURN_ON as LIGHT_SERVICE_TURN_ON,
)
from homeassistant.core import ServiceCall
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import ATTR_PATH, ATTR_URL, DOMAIN, SERVICE_TURN_ON
_LOGGER = logging.getLogger(__name__)
# Extend the existing light.turn_on service schema
SERVICE_SCHEMA = vol.All(
cv.has_at_least_one_key(ATTR_URL, ATTR_PATH),
cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
vol.Exclusive(ATTR_PATH, "color_extractor"): cv.isfile,
vol.Exclusive(ATTR_URL, "color_extractor"): cv.url,
}
),
)
def _get_file(file_path):
"""Get a PIL acceptable input file reference.
Allows us to mock patch during testing to make BytesIO stream.
"""
return file_path
def _get_color(file_handler) -> tuple:
"""Given an image file, extract the predominant color from it."""
color_thief = ColorThief(file_handler)
# get_color returns a SINGLE RGB value for the given image
color = color_thief.get_color(quality=1)
_LOGGER.debug("Extracted RGB color %s from image", color)
return color
async def async_setup(hass, hass_config):
"""Set up services for color_extractor integration."""
async def async_handle_service(service_call: ServiceCall) -> None:
"""Decide which color_extractor method to call based on service."""
service_data = dict(service_call.data)
try:
if ATTR_URL in service_data:
image_type = "URL"
image_reference = service_data.pop(ATTR_URL)
color = await async_extract_color_from_url(image_reference)
elif ATTR_PATH in service_data:
image_type = "file path"
image_reference = service_data.pop(ATTR_PATH)
color = await hass.async_add_executor_job(
extract_color_from_path, image_reference
)
except UnidentifiedImageError as ex:
_LOGGER.error(
"Bad image from %s '%s' provided, are you sure it's an image? %s",
image_type,
image_reference,
ex,
)
return
if color:
service_data[ATTR_RGB_COLOR] = color
await hass.services.async_call(
LIGHT_DOMAIN, LIGHT_SERVICE_TURN_ON, service_data, blocking=True
)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON,
async_handle_service,
schema=SERVICE_SCHEMA,
)
async def async_extract_color_from_url(url):
"""Handle call for URL based image."""
if not hass.config.is_allowed_external_url(url):
_LOGGER.error(
"External URL '%s' is not allowed, please add to 'allowlist_external_urls'",
url,
)
return None
_LOGGER.debug("Getting predominant RGB from image URL '%s'", url)
# Download the image into a buffer for ColorThief to check against
try:
session = aiohttp_client.async_get_clientsession(hass)
async with async_timeout.timeout(10):
response = await session.get(url)
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
_LOGGER.error("Failed to get ColorThief image due to HTTPError: %s", err)
return None
content = await response.content.read()
with io.BytesIO(content) as _file:
_file.name = "color_extractor.jpg"
_file.seek(0)
return _get_color(_file)
def extract_color_from_path(file_path):
"""Handle call for local file based image."""
if not hass.config.is_allowed_path(file_path):
_LOGGER.error(
"File path '%s' is not allowed, please add to 'allowlist_external_dirs'",
file_path,
)
return None
_LOGGER.debug("Getting predominant RGB from file path '%s'", file_path)
_file = _get_file(file_path)
return _get_color(_file)
return True
|
[
"noreply@github.com"
] |
jason0x43.noreply@github.com
|
db10855a9829a6b7fc7b3239048bf8d6e30c8849
|
83e18f5d4fcd7084defb32981337a8f9b646c4c7
|
/python/91.decode-ways.py
|
926f10a41f2cdbf2fa346422937918a04b9d8437
|
[
"MIT"
] |
permissive
|
Zhenye-Na/leetcode
|
709037a318e1be7e6ab92751f8695d888900591a
|
18d91a6ba813f91531b04632563212dfde2cceb9
|
refs/heads/master
| 2023-04-10T07:06:06.502224
| 2023-04-01T00:18:44
| 2023-04-01T00:18:44
| 145,656,854
| 19
| 9
|
MIT
| 2022-05-16T03:14:02
| 2018-08-22T04:39:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
#
# @lc app=leetcode id=91 lang=python3
#
# [91] Decode Ways
#
# https://leetcode.com/problems/decode-ways/description/
#
# algorithms
# Medium (27.50%)
# Likes: 4791
# Dislikes: 3513
# Total Accepted: 593.9K
# Total Submissions: 2.1M
# Testcase Example: '"12"'
#
# A message containing letters from A-Z can be encoded into numbers using the
# following mapping:
#
#
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
#
#
# To decode an encoded message, all the digits must be grouped then mapped back
# into letters using the reverse of the mapping above (there may be multiple
# ways). For example, "11106" can be mapped into:
#
#
# "AAJF" with the grouping (1 1 10 6)
# "KJF" with the grouping (11 10 6)
#
#
# Note that the grouping (1 11 06) is invalid because "06" cannot be mapped
# into 'F' since "6" is different from "06".
#
# Given a string s containing only digits, return the number of ways to decode
# it.
#
# The answer is guaranteed to fit in a 32-bit integer.
#
#
# Example 1:
#
#
# Input: s = "12"
# Output: 2
# Explanation: "12" could be decoded as "AB" (1 2) or "L" (12).
#
#
# Example 2:
#
#
# Input: s = "226"
# Output: 3
# Explanation: "226" could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2
# 2 6).
#
#
# Example 3:
#
#
# Input: s = "0"
# Output: 0
# Explanation: There is no character that is mapped to a number starting with
# 0.
# The only valid mappings with 0 are 'J' -> "10" and 'T' -> "20", neither of
# which start with 0.
# Hence, there are no valid ways to decode this since all digits need to be
# mapped.
#
#
# Example 4:
#
#
# Input: s = "06"
# Output: 0
# Explanation: "06" cannot be mapped to "F" because of the leading zero ("6" is
# different from "06").
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 100
# s contains only digits and may contain leading zero(s).
#
#
#
# @lc code=start
class Solution:
def numDecodings(self, s: str) -> int:
s = "#" + s
n = len(s)
dp = [0 for _ in range(n)]
dp[0] = 1
if s[1] == "0":
return 0
else:
dp[1] = 1
for i in range(2, n):
if s[i] == "0":
if s[i - 1] == "1" or s[i - 1] == "2":
dp[i] += dp[i - 2]
else:
return 0
else:
# s[i] = 1 ... 9
dp[i] += dp[i - 1]
if s[i - 1] == "1" or s[i - 1] == "2" and int(s[i]) <= 6:
dp[i] += dp[i - 2]
return dp[n - 1]
# @lc code=end
|
[
"nazhenye@gmail.com"
] |
nazhenye@gmail.com
|
b042229ecf57d55791109c255c5332fd28a2c071
|
c1a1d21ff56175c00f89cfb721f3eec1575e1b2e
|
/code/python/leetcode/pascals-triangle-ii.py
|
36f76aa085ab7d4d90bb6611a02743c65d565a16
|
[] |
no_license
|
zhuzeyu22/cowry
|
ee8501049447b694d35cce88392405610334382e
|
e135038caff7fc0743e33525413d415ac69ac898
|
refs/heads/master
| 2020-05-29T11:53:10.981688
| 2016-06-02T14:18:31
| 2016-06-02T14:18:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
class Solution:
# @return a list of lists of integers
def generate(self, numRows):
if numRows == 0:
return []
l = [[1],]
while len(l) < numRows:
nrow = [1] + [0] * (len(l)-1) + [1]
for i in range(1, len(nrow) - 1):
# print i, nrow, l
nrow[i] += l[-1][i-1] + l[-1][i]
l.append(nrow)
return l
def getRow(self, rowIndex):
return self.generate(rowIndex+1)[-1]
print Solution().getRow(3)
|
[
"geekan@foxmail.com"
] |
geekan@foxmail.com
|
6076b1d83d498c7d5098c42a7e74abd9531c111f
|
47082917dde1e8af42197dbc299e69e62297b34f
|
/config/urls.py
|
6352f0ca3dac2706a3823b1f981270aedd27e0c3
|
[] |
no_license
|
navill/Dstagram-repository
|
66520ed6fd5e8cf3fe633c04c7a1eaaae9bc57c0
|
fb44acea8906fcac5724d4f9bdfdfe4e218f947a
|
refs/heads/master
| 2020-06-10T20:59:28.581733
| 2019-06-25T16:31:58
| 2019-06-25T16:31:58
| 193,745,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static # 특정 리소스를 static형태로 응답
from django.conf import settings # 장고의 셋팅값을 불러다 주는 역할
urlpatterns = [
path('site_config/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('accounts/', include('accounts.urls')),
path('', include('photo.urls'))
]
# image 출력을 위해 다음 urlpattern을 추가
# -> deploy, live일 때는 사용하지 않음
# -> 장고에서 처리해야할 일이 아니기 때문에
# -> web server(heroku는 지원하지 않음)가 해주거나
# -> 파일 서버를 별도로 셋팅
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#
# if settings.DEBUG:
# import debug_toolbar
# urlpatterns = [
# path('__debug__/', include(debug_toolbar.urls)),
# ] + urlpatterns
|
[
"blue_jihoon@naver.com"
] |
blue_jihoon@naver.com
|
a4e1afe224daa8082b4cdfd04c6122d18cf4637c
|
8cc862aa51d3fec95d094dc4bd3151e1155d240a
|
/pythonProject/imports/using_sys.py
|
4c609d014e0a48228c4d6084ff8263843af572e3
|
[] |
no_license
|
activehuahua/python
|
bcbf3a2190025e2315399bfd0c725f598211632b
|
cc36a93c01c53f856426ccf2724848142524d9c0
|
refs/heads/master
| 2023-04-14T10:23:21.590765
| 2019-08-12T06:52:15
| 2019-08-12T06:52:15
| 160,277,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
# -*- coding: utf-8 -*-
'''
@Author : zhaojianghua
@File : using_sys.py
@Time : 2018/12/5 9:54
'''
import sys
print('命令行参数如下:')
for i in sys.argv:
print(i)
print('\n\nPython 路径为:', sys.path, '\n')
|
[
"zhaojianghua@pretang.com"
] |
zhaojianghua@pretang.com
|
dba0a06a82fbbfd3e411a7aa0a4f2a0711b37607
|
423ca5205aaf0b2d3bfff9affe2172fec21bfad0
|
/web/pgadmin/browser/server_groups/servers/databases/schemas/domains/domain_constraints/tests/test_domain_constraints_add.py
|
b7f603fdfd4c1adca8a359d0d07247caee18a598
|
[
"PostgreSQL"
] |
permissive
|
adityatoshniwal/pgadmin4
|
25cc665d1438f82bdb17f13270933c43e3a98f4b
|
2aea5b41ad8b6bd4a408a87a6743fcbfc88ed329
|
refs/heads/master
| 2023-09-03T20:04:15.941551
| 2023-07-31T09:32:30
| 2023-07-31T09:32:30
| 419,212,569
| 0
| 0
|
NOASSERTION
| 2023-01-02T05:37:03
| 2021-10-20T06:34:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,748
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as domain_cons_utils
from unittest.mock import patch
class DomainConstraintAddTestCase(BaseTestGenerator):
""" This class will add new domain constraint under schema node. """
scenarios = utils.generate_scenarios('domain_constraint_create',
domain_cons_utils.test_cases)
def setUp(self):
super().setUp()
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
self.domain_name = "domain_%s" % (str(uuid.uuid4())[1:8])
self.domain_info = domain_cons_utils.create_domain(self.server,
self.db_name,
self.schema_name,
self.schema_id,
self.domain_name)
def create_domain_constraint(self):
"""
This function create a domain constraint and returns it
:return: created domain constraint response
"""
return self.tester.post(self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) +
'/' + str(self.schema_id) + '/' +
str(self.domain_id) + '/',
data=json.dumps(self.test_data),
content_type='html/json',
follow_redirects=True)
def runTest(self):
""" This function will add domain constraint under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
self.test_data['name'] =\
"test_domain_con_add_%s" % (str(uuid.uuid4())[1:8])
self.domain_id = self.domain_info[0]
if self.is_positive_test:
response = self.create_domain_constraint()
else:
if hasattr(self, "internal_server_error"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_in_db"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_getting_coid"):
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = self.create_domain_constraint()
if hasattr(self, "error_domain_id"):
self.domain_id = 99999
response = self.create_domain_constraint()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
|
[
"akshay.joshi@enterprisedb.com"
] |
akshay.joshi@enterprisedb.com
|
047ddbf79cd824eea356100f84caef3d7a7612d3
|
e4a9a67f1d79b3430aa43ebdb905a08717ee118a
|
/COT/helpers/tests/test_vmdktool.py
|
58e36f57ba914bde6ee0b591ad71cdbf5d7e9a20
|
[
"MIT"
] |
permissive
|
digideskio/cot
|
30c724c5b76abd5187a9c1e3c6f15a462b324da8
|
8fc84c8c72a9acb4adffca859154055f2857b53f
|
refs/heads/master
| 2021-01-12T20:50:05.208963
| 2016-05-11T15:31:29
| 2016-05-11T15:31:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,826
|
py
|
#!/usr/bin/env python
#
# test_vmdktool.py - Unit test cases for COT.helpers.vmdktoolsubmodule.
#
# March 2015, Glenn F. Matthews
# Copyright (c) 2014-2015 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.helpers.vmdktool submodule."""
import mock
import os
from distutils.version import StrictVersion
from .test_helper import HelperUT
from COT.helpers.helper import Helper
from COT.helpers.vmdktool import VmdkTool
class TestVmdkTool(HelperUT):
"""Test cases for VmdkTool helper class."""
def setUp(self):
"""Test case setup function called automatically prior to each test."""
self.helper = VmdkTool()
super(TestVmdkTool, self).setUp()
def test_get_version(self):
"""Test .version getter logic."""
self.fake_output = "vmdktool version 1.4"
self.assertEqual(StrictVersion("1.4"), self.helper.version)
def test_install_helper_already_present(self):
"""Do nothing instead of re-installing."""
self.helper.install_helper()
self.assertEqual([], self.last_argv)
self.assertLogged(**self.ALREADY_INSTALLED)
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def test_install_helper_apt_get(self,
mock_makedirs,
mock_exists,
mock_isdir):
"""Test installation via 'apt-get'."""
mock_isdir.return_value = False
mock_exists.return_value = False
mock_makedirs.side_effect = OSError
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = True
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = False
Helper._apt_updated = False
self.fake_output = 'is not installed and no information is available'
self.system = 'Linux'
os.environ['PREFIX'] = '/usr/local'
if 'DESTDIR' in os.environ:
del os.environ['DESTDIR']
self.helper.install_helper()
self.assertEqual([
['dpkg', '-s', 'make'],
['sudo', 'apt-get', '-q', 'update'],
['sudo', 'apt-get', '-q', 'install', 'make'],
['dpkg', '-s', 'zlib1g-dev'],
['sudo', 'apt-get', '-q', 'install', 'zlib1g-dev'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/bin'],
['make', 'install', 'PREFIX=/usr/local'],
], self.last_argv)
self.assertTrue(Helper._apt_updated)
# Make sure we don't 'apt-get update/install' again unnecessarily
self.fake_output = 'install ok installed'
os.environ['PREFIX'] = '/opt/local'
os.environ['DESTDIR'] = '/home/cot'
self.last_argv = []
self.helper.install_helper()
self.assertEqual([
['dpkg', '-s', 'make'],
['dpkg', '-s', 'zlib1g-dev'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755',
'/home/cot/opt/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/home/cot/opt/local/bin'],
['make', 'install', 'PREFIX=/opt/local', 'DESTDIR=/home/cot'],
], self.last_argv)
def test_install_helper_port(self):
"""Test installation via 'port'."""
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['port'] = True
Helper._port_updated = False
self.helper.install_helper()
self.assertEqual([
['sudo', 'port', 'selfupdate'],
['sudo', 'port', 'install', 'vmdktool']
], self.last_argv)
self.assertTrue(Helper._port_updated)
# Make sure we don't 'port selfupdate' again unnecessarily
self.last_argv = []
self.helper.install_helper()
self.assertEqual([
['sudo', 'port', 'install', 'vmdktool']
], self.last_argv)
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def test_install_helper_yum(self,
mock_makedirs,
mock_exists,
mock_isdir):
"""Test installation via 'yum'."""
mock_isdir.return_value = False
mock_exists.return_value = False
mock_makedirs.side_effect = OSError
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = False
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = True
self.system = 'Linux'
os.environ['PREFIX'] = '/usr/local'
if 'DESTDIR' in os.environ:
del os.environ['DESTDIR']
self.helper.install_helper()
self.assertEqual([
['sudo', 'yum', '--quiet', 'install', 'make'],
['sudo', 'yum', '--quiet', 'install', 'zlib-devel'],
['make', 'CFLAGS="-D_GNU_SOURCE -g -O -pipe"'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/man/man8'],
['sudo', 'mkdir', '-p', '--mode=755', '/usr/local/bin'],
['make', 'install', 'PREFIX=/usr/local'],
], self.last_argv)
def test_install_helper_unsupported(self):
"""Unable to install without a package manager."""
Helper.find_executable = self.stub_find_executable
Helper.PACKAGE_MANAGERS['apt-get'] = False
Helper.PACKAGE_MANAGERS['port'] = False
Helper.PACKAGE_MANAGERS['yum'] = False
with self.assertRaises(NotImplementedError):
self.helper.install_helper()
def test_convert_unsupported(self):
"""Negative test - conversion to unsupported format/subformat."""
with self.assertRaises(NotImplementedError):
self.helper.convert_disk_image(self.blank_vmdk, self.temp_dir,
'qcow2')
with self.assertRaises(NotImplementedError):
self.helper.convert_disk_image(self.blank_vmdk, self.temp_dir,
'vmdk', 'monolithicSparse')
|
[
"glenn@e-dad.net"
] |
glenn@e-dad.net
|
e99a04d4cf4a320eea6c46709c2ddc8dda6c5981
|
b648a0ff402d23a6432643879b0b81ebe0bc9685
|
/scripts/json-equals.py
|
7704bba587a813f68c8ef03a1c5c9db9af33cc46
|
[
"Apache-2.0"
] |
permissive
|
jviotti/binary-json-size-benchmark
|
4712faca2724d47d23efef241983ce875dc71cee
|
165b577884ef366348bf48042fddf54aacfe647a
|
refs/heads/main
| 2023-04-18T01:40:26.141995
| 2022-12-19T13:25:35
| 2022-12-19T13:25:35
| 337,583,132
| 21
| 1
|
Apache-2.0
| 2022-12-17T21:53:56
| 2021-02-10T01:18:05
|
C++
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
import sys
import json
with open(sys.argv[1], mode='r') as json_data:
data1 = json.loads(json_data.read())
with open(sys.argv[2], mode='r') as json_data:
data2 = json.loads(json_data.read())
if data1 == data2:
print("Files are equal!")
sys.exit(0)
else:
print("Files are NOT equal!")
sys.exit(1)
|
[
"jv@jviotti.com"
] |
jv@jviotti.com
|
8018f217c6e18cdb2bb6f2517df37cde252a8c36
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007/programming/libs/exiv2/actions.py
|
368c4462143cec10373c6bedb7c05cc40a320ad6
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("README","doc/ChangeLog")
pisitools.dohtml("doc/html/*")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
90d887816136ef7ea406db5120f7ddfd8554e2c9
|
f58a1dcae97115b566409704dcf1a46a5f86df47
|
/Bellevue University/Courses/DSC640/Matplotlib for python Developers/7900_Code/Chapter 03/7900_03_15.py
|
20b070bc387075f7ddde931334627adb0d70a5ca
|
[] |
no_license
|
safarie1103/Safarie1103
|
318519ace23c33fcf6d36337392156e5381abd49
|
a86172bfc47eff0af65285b641af0ad26e13fd12
|
refs/heads/master
| 2023-06-13T01:43:35.761325
| 2023-06-07T16:01:16
| 2023-06-07T16:01:16
| 205,732,823
| 0
| 1
| null | 2022-11-28T15:55:13
| 2019-09-01T21:11:38
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
plt.figure(figsize=(3,3))
x = [45, 35, 20]
labels = ['Cats', 'Dogs', 'Fishes']
plt.pie(x, labels = labels)
plt.show()
|
[
"54446804+safarie1103@users.noreply.github.com"
] |
54446804+safarie1103@users.noreply.github.com
|
f5e1b29ce42118842f5d23bc27518c4a367946ef
|
8585e7b3bbb71218fcb4dcb8fb99b46f6973ed72
|
/healthack/construct_dirs.py
|
9d57d47edf4a4cc7a436084333b5f4f8b8baa03f
|
[] |
no_license
|
koike-ya/health
|
eeed56a8940d1c30333069a2ab339bb6d5937118
|
87bd1842d49e34abef8c66f666b6526d3fb18522
|
refs/heads/master
| 2021-10-11T13:59:19.625847
| 2019-01-27T08:55:09
| 2019-01-27T08:55:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
from pathlib import Path
Path(Path.cwd() / "setup.py").touch(exist_ok=True)
Path(Path.cwd() / "config").mkdir(exist_ok=True)
Path(Path.cwd() / "config" / "const.py").touch(exist_ok=True)
Path(Path.cwd() / "notebooks").mkdir(exist_ok=True)
Path(Path.cwd() / "data" / "processed").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "models").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "data" / "raw").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "src" / "features").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "models").mkdir(exist_ok=True)
Path(Path.cwd() / "src" / "visualization").mkdir(exist_ok=True)
Path(Path.cwd() / "reports" / "figures").mkdir(exist_ok=True, parents=True)
Path(Path.cwd() / "reports" / "results").mkdir(exist_ok=True)
Path(Path.cwd() / "logs").mkdir(exist_ok=True)
|
[
"makeffort134@gmail.com"
] |
makeffort134@gmail.com
|
e0c7b019890ee9b53c7d4fa7a809375a21ccfc2b
|
4864e58bb9ac93c34f2988f50bec143fbe7b5278
|
/blog/migrations/0019_auto_20210114_1845.py
|
db6d08a76a1bd9374d50ba620e0ae0600f5031ca
|
[] |
no_license
|
Subhrans/Blog_App
|
7e536b868645c5ffc0a35a4a63b206ddd5ab0965
|
a81b4adeb8c0cb3bea5ffa85c6f1e2954c23e54a
|
refs/heads/main
| 2023-04-21T05:25:09.113818
| 2021-05-08T18:23:38
| 2021-05-08T18:23:38
| 327,252,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
# Generated by Django 3.0.1 on 2021-01-14 13:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20210114_0331'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='like',
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('like_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.UserProfile')),
('like_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
|
[
"subhransud525@gmail.com"
] |
subhransud525@gmail.com
|
590d8055aec2d9c89103005c1b857abc0f8fab1d
|
7d4a3504bb9daa2589e2580de21e0e15c334787c
|
/tst/select/select_suite.py
|
1226f95721a4f90fecbad8c9e3c507bf770c10a1
|
[
"MIT"
] |
permissive
|
ericwu/pumbaa
|
4cfac76c29dc35223a3df1a89dbde2f6bbf44719
|
e355c0a9ec28cfcfa3daabea9ba4c7cb55907efb
|
refs/heads/master
| 2021-01-13T16:53:42.408105
| 2017-01-23T18:31:04
| 2017-01-23T18:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
#
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016, Erik Moqvist
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
import select
import board
from sync import Event, Queue
from drivers import Can, Uart
import harness
from harness import assert_raises
def test_help():
poll = select.poll()
help(select)
help(poll)
def test_register_unregister():
poll = select.poll()
queue = Queue()
event = Event()
can = Can(board.CAN_0)
uart = Uart(1)
poll.register(queue)
poll.register(event)
poll.register(can)
poll.register(uart)
poll.unregister(queue)
poll.unregister(event)
poll.unregister(can)
poll.unregister(uart)
with assert_raises(OSError):
poll.unregister(queue)
def test_poll():
poll = select.poll()
queue = Queue()
event = Event()
can = Can(board.CAN_0)
uart = Uart(1)
# Register both event channels.
poll.register(queue)
poll.register(event)
poll.register(can)
poll.register(uart)
# Timeout waiting for event.
assert poll.poll(0.01) == []
# Event write, poll and read.
event.write(0x1)
assert poll.poll() == [(event, select.POLLIN)]
assert event.read(0x1) == 0x1
# Queue write, poll and read.
queue.write(b'foo')
assert poll.poll() == [(queue, select.POLLIN)]
assert queue.read(3) == b'foo'
def test_bad_arguments():
poll = select.poll()
with assert_raises(TypeError, "channel object required"):
poll.register(None)
with assert_raises(OSError):
poll.unregister(None)
TESTCASES = [
(test_help, "test_help"),
(test_register_unregister, "test_register_unregister"),
(test_poll, "test_poll"),
(test_bad_arguments, "test_bad_arguments")
]
|
[
"erik.moqvist@gmail.com"
] |
erik.moqvist@gmail.com
|
194f0811f4c2ee6cea15e7c6797edd4496899d26
|
d282fe910d95b3f23254e5e0d5309c082de81419
|
/Ent/E4/demo_skylineviewer.py
|
2bf167edf196ad83e52e2d6df59c9ed989dffc8f
|
[
"Apache-2.0"
] |
permissive
|
Mi7ai/Algoritmia2
|
54fff6d3925ddc8067303d2e507ccde8ba9a025a
|
2f1c7b3990e4971f4f977fd0ea4d308004ab3db5
|
refs/heads/master
| 2021-06-21T18:45:57.875364
| 2021-03-22T12:54:47
| 2021-03-22T12:54:47
| 207,576,996
| 0
| 0
| null | 2021-03-22T12:54:47
| 2019-09-10T14:08:46
|
Assembly
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
from Utils.skylineviewer import SkylineViewer
buildings = [(1, 10, 3), (2, 5, 5), (3, 6, 3), (4, 7, 5), (10, 10, 3), (9, 4, 6), (20, 8, 4), (22, 6, 6), (25, 10, 2)]
skyline = [1, 10, 4, 7, 9, 4, 10, 10, 13, 4, 15, 0, 20, 8, 24, 6, 25, 10, 27, 6, 28]
viewer = SkylineViewer(skyline)
for b in buildings:
viewer.add_building(b)
viewer.run()
|
[
"hottmayer@gmail.com"
] |
hottmayer@gmail.com
|
323b5f80a3a048ee37471233c5e6663d18ed90b6
|
c7e765a9bed33d3bfb21774e3995bf4a09e04add
|
/adminmgr/media/code/A3/task3/BD_198_225_960_HjAczew.py
|
153f36b21dacb14ce5be9b6efaf652ff426bd9c2
|
[
"Apache-2.0"
] |
permissive
|
IamMayankThakur/test-bigdata
|
13dd2ac7fb76c9baed6c3a0aa943057a22e2d237
|
7f507918c7bec31c92eedcd94491a83486623049
|
refs/heads/master
| 2022-05-03T00:59:44.127494
| 2022-02-10T19:50:16
| 2022-02-10T19:50:16
| 201,585,028
| 10
| 4
|
Apache-2.0
| 2022-04-22T23:39:45
| 2019-08-10T05:34:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
#from pyspark.sql import Row,SQLContext
import sys
import requests
import re
from operator import add
def process_rdd(time, rdd):
# print("----------=========- %s -=========----------" % str(time))
row_rdd = rdd.map(lambda w:(w[0],w[1]))
maximum = row_rdd.take(6)
hashh=""
i=0
while i<len(maximum):
if(maximum[i][0]!=''):
if i==(len(maximum)-1):
hashh=hashh+str(maximum[i][0])
else:
hashh=hashh+str(maximum[i][0])+","
i=i+1
print("%s"%(hashh))
if len(sys.argv) != 3:
print("Should enter file, Window Size, Batch Duration", file=sys.stderr)
sys.exit(-1)
wind_size=int(sys.argv[1])
batch_duration=int(sys.argv[2])
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,batch_duration)
ssc.checkpoint("home/hduser/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
tweet=dataStream.map(lambda w:(w.split(';')[7]))
hashtag=tweet.flatMap(lambda w:(w.split(',')))
hasht=hashtag.map(lambda w:(w,1))
counts=hasht.filter(lambda x:x!=None)
totalcount=counts.reduceByKeyAndWindow(lambda a,b: a+b, wind_size, batch_duration).transform(lambda rdd: rdd.sortBy(lambda y: (-y[1],y[0])))
totalcount.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
|
[
"ubuntu@ip-172-31-18-251.ap-south-1.compute.internal"
] |
ubuntu@ip-172-31-18-251.ap-south-1.compute.internal
|
97bcf5441c08a2e89ecd3c1db61840c55422e13d
|
0b0d3246d39974cb8faff7d269da2d539415afab
|
/problem_python/p283.py
|
3a6704dc2d9bb763a3c83885a6be6906ca384262
|
[] |
no_license
|
xionghhcs/leetcode
|
972e7ae4ca56b7100223630b294b5a97ba5dd7e8
|
8bd43dcd995a9de0270b8cea2d9a48df17ffc08b
|
refs/heads/master
| 2020-03-07T17:18:08.465559
| 2019-09-29T11:11:26
| 2019-09-29T11:11:26
| 127,607,564
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def swap(a, i, j):
tmp = a[i]
a[i] = a[j]
a[j] = tmp
for i in range(len(nums)):
if nums[i] == 0:
j = i + 1
while j < len(nums) and nums[j] == 0:
j += 1
if j != len(nums):
swap(nums, i, j)
|
[
"xionghhcs@163.com"
] |
xionghhcs@163.com
|
4c81f1a6460a9bb7ccdc7063e49e475861567b6c
|
164ffe077dde59373ad9fadcfd727f279a1cfe93
|
/jni_build/jni/include/tensorflow/python/ops/numerics.py
|
bd96d9a72cc653be506d2fa812d7b341a44bdafe
|
[] |
no_license
|
Basofe/Community_Based_Repository_Traffic_Signs
|
524a4cfc77dc6ed3b279556e4201ba63ee8cf6bd
|
a20da440a21ed5160baae4d283c5880b8ba8e83c
|
refs/heads/master
| 2021-01-22T21:17:37.392145
| 2017-09-28T21:35:58
| 2017-09-28T21:35:58
| 85,407,197
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connects all half, float and double tensors to CheckNumericsOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
Returns:
Same tensor as `t`.
"""
with ops.op_scope([t], name, "VerifyFinite") as name:
t = ops.convert_to_tensor(t, name="t")
with ops.colocate_with(t):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Returns:
A `group` op depending on all `check_numerics` ops added.
"""
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
|
[
"helder_m_p_novais@hotmail.com"
] |
helder_m_p_novais@hotmail.com
|
37490c2e36c27fde373437a4c3e932557b84fc75
|
4d360320e06339a4f7d2a2723cddf02ff02a306e
|
/0x06-python-classes/3-square.py
|
f4c53a022a14d9265165fae9db6fb90f9362d80f
|
[] |
no_license
|
AmineNeifer/holbertonschool-higher_level_programming
|
fd6ccdb1b5f0dc85e10750e9f2c7824290697e85
|
f5c42bff003b85a7c19702e0233997645fce2fb1
|
refs/heads/master
| 2020-09-29T02:56:52.286548
| 2020-05-15T00:12:50
| 2020-05-15T00:12:50
| 226,933,206
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
#!/usr/bin/python3
class Square:
def __init__(self, size=0):
"""Args:
size: size of the Square.
"""
if type(size) is not int:
raise TypeError("size must be an integer")
elif size < 0:
raise ValueError("size must be >= 0")
else:
self.__size = size
def area(self):
"""Returns:
the area of the square (size)
"""
return self.__size * self.__size
|
[
"amineneifer2000@gmail.com"
] |
amineneifer2000@gmail.com
|
a95c0d6cbc8db379f298e52cebd758fbec611534
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/layout/uniformtext/_mode.py
|
a3fdc332612749ea2308c67e4091aac3c79eb00a
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import _plotly_utils.basevalidators
class ModeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="mode", parent_name="layout.uniformtext", **kwargs):
super(ModeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", [False, "hide", "show"]),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
33f989d69d8aef0c49fbf0a3dbee6ff0647c9d01
|
253f3a81b582ee53b86451dc5a06d6dc8923b0dd
|
/src/commands/commandslist.py
|
946bf2c4b052a3394de39102411e47b230dc7f67
|
[] |
no_license
|
bdubyapee/akriosmud
|
c02ff2c9e3916efedc4837b19e02caf6255045f9
|
d6c234e22fc56422315553217639bcb3e4c49984
|
refs/heads/master
| 2020-04-16T04:53:02.163852
| 2020-02-17T01:09:10
| 2020-02-17T01:09:10
| 165,284,647
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
# Project: Akrios
# Filename: commands/commandlist.py
#
# Capability: player
#
# Command Description: Listing of currently available commands filtered by capabilities.
#
# By: Jubelo
from commands import *
name = "commandslist"
version = 1
requirements = {'capability': ['player'],
'generic_fail': "See {WHelp commandlist{x for help with this command.",
'truth_checks': [],
'false_checks': []}
@Command(**requirements)
def commandslist(caller, args, **kwargs):
header = f"{{rCommands Available{{x"
caller.write(f"{header:^80}")
caller.write("")
sub_header = f"{{BPlease see {{Whelp <command>{{B for additional information{{x"
caller.write(f"{sub_header:^80}")
caller.write("")
cmd_list = [cmd for cmd in Command.commandhash
if set(Command.commandcapability[cmd]) & set(caller.capability)]
cmd_list.sort()
numcols = 4
while (len(cmd_list) % numcols) > 0:
cmd_list.append(' ')
for i in range(0, len(cmd_list), numcols):
output = ''
for l in range(0, numcols):
output = f"{output}{cmd_list[i+l]:20}"
caller.write(output)
caller.write("")
caller.write("\n\r{WUsage{x: <command> <optional arguments>")
|
[
"phippsb@gmail.com"
] |
phippsb@gmail.com
|
ebe543088903155d46d06e03f07284f75a632e35
|
09996c147d498e61352683c5e7df0f3cd517ea27
|
/test/oldcrab/whelicity_DataDoubleEl_SE_GH_cfg.py
|
485f08167cd51bc90018ac079a2e540233e35f86
|
[] |
no_license
|
shchenarani/whelicityAnalyzer
|
3e3320a6d03eab21de6d51dad60f057b6a2f3d47
|
8b4586f7210c6a166b949470c22310b25683da4f
|
refs/heads/master
| 2021-09-10T12:22:52.088849
| 2018-03-26T07:52:54
| 2018-03-26T07:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
## configure process options
process.options = cms.untracked.PSet(
allowUnscheduled = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(True)
)
## configure geometry & conditions
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.load("Configuration.StandardSequences.MagneticField_cff")
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v6'
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource",fileNames = readFiles)
readFiles.extend( [
'root://xrootd-cms.infn.it//store/data/Run2016E/DoubleEG/MINIAOD/03Feb2017-v1/110000/EA7C2D56-A1EA-E611-86B2-0CC47A13CC7A.root']);
#
# Set up electron ID (VID framework)
#
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
# turn on VID producer, indicate data format to be
# DataFormat.AOD or DataFormat.MiniAOD, as appropriate
useAOD = False
if useAOD == True :
dataFormat = DataFormat.AOD
else :
dataFormat = DataFormat.MiniAOD
switchOnVIDElectronIdProducer(process, dataFormat)
# define which IDs we want to produce
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Summer16_80X_V1_cff']
#add them to the VID producer
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
process.load("whelicity1.MiniAnalyzer.whelicity_cff")
process.Whelicity.isData = cms.bool(True)
process.Whelicity.isPythia = cms.bool(False)
process.Whelicity.isSingleElectron = cms.bool(True)
process.Whelicity.DiEl = cms.bool(True)
process.Whelicity.muonISOSF = cms.string("ISOEfficienciesAndSF_GH.root")
process.Whelicity.muonIDSF = cms.string("IDEfficienciesAndSF_GH.root")
process.Whelicity.outFileName = cms.string("tree.root")
process.TFileService = cms.Service("TFileService",
fileName = cms.string("histos.root")
)
# Make sure to add the ID sequence upstream from the user analysis module
process.p = cms.Path(process.egmGsfElectronIDSequence * process.Whelicity)
|
[
"hesam.kaveh@gmail.com"
] |
hesam.kaveh@gmail.com
|
29a1f916d57515291923131169a4a24024185702
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/aav/util/model_utils.py
|
70f397164e6eed14c9701edf79d67e33b353a1f5
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 3,577
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities for extracting information from training checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas
import tensorflow as tf
def get_best_checkpoint_path(
model_dir, metric='loss', eval_subdir='eval_one_pass'):
"""Gets the path of the best checkpoint by given metric.
Args:
model_dir: (str) Path to tf.Estimator model.
metric: (str) Model evaluation metric over which to optimize.
eval_subdir: (str) Subdir path within model_dir to search for evaluation
events.
Returns:
(str) The path to the model best checkpoint.
Raises:
ValueError: If the given metric is not supported.
"""
events = tf.event_accumulator.EventAccumulator(
os.path.join(model_dir, eval_subdir))
events.Reload() # Actually read the event files into memory.
step = None
if metric == 'precision':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'loss':
step = _get_best_checkpoint_step(events, metric, higher_is_better=False)
elif metric == 'accuracy':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'recall':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
else:
raise ValueError('Unknown metric "%s" is not supported' % metric)
return os.path.join(model_dir, 'model.ckpt-%d' % step)
def _get_best_checkpoint_step(
events, metric_key='precision', higher_is_better=True):
"""Gets the global step number of the best checkpoint by given metric.
Args:
events: (tf.Events) The summary events for a model evaluation.
metric_key: (str) The model evaluation metric key to optimize over.
higher_is_better: (bool) Is a higher value of the metric better?
Returns:
(int) The global step number of the best checkpoint.
"""
summary_df = pandas.DataFrame([
{'step': entry.step, metric_key: entry.value}
for entry in events.Scalars(metric_key)
])
metric = summary_df[metric_key]
best_index = None
if higher_is_better:
best_index = metric.idxmax()
else:
best_index = metric.idxmin()
best_checkpoint = summary_df.iloc[best_index]
return best_checkpoint.step
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
e7d70888cafcdf8d8f016d284cb11462549acf2c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_189/ch30_2019_08_26_19_06_32_624802.py
|
0bf8b64bd817ff2689f3209d5285dc5d58edda50
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
def distancia(ang,vel):
import math
dist=((vel**2)*math.sin(2*ang))/9.8
return dist
if 98<=distancia(ang,vel)<=102:
print("Acertou!")
elif distancia(ang,vel)>102:
print("Muito longe")
else:
print("Muito perto")
|
[
"you@example.com"
] |
you@example.com
|
0294350b106cf605e7bc42c9069605f5e39f7c89
|
22bf2740e893b5020088b0d47f7b57eb2f9a2b5f
|
/version3/source/insert.py
|
435e9f58028892f9d0b4a196e61675f5c9c0353e
|
[] |
no_license
|
letianccc/latin_database
|
f2c1f9c58f398d322f722a3b1dade2296a2da19a
|
1aa6c7eed57f6ea72d6e82e0a19b7a9614fb34c8
|
refs/heads/master
| 2021-04-15T09:26:48.132616
| 2018-04-12T23:40:17
| 2018-04-12T23:40:17
| 126,196,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from version3.source.catalog import Catalog
class Insert:
def __init__(self, table_name, tuple_, tran_id=None):
self.table_name = table_name
self.tuple = tuple_
self.tran_id = tran_id
def execute(self):
hf = Catalog.name_to_file(self.table_name)
hf.insert_tuple(self.tuple, self.tran_id, 'X')
|
[
"704364447@qq.com"
] |
704364447@qq.com
|
07e22436ef683aac6402b919ca8971015eb64d89
|
938a496fe78d5538af94017c78a11615a8498682
|
/algorithms/401-500/434.number-of-segments-in-a-string.py
|
30057d9c241f8500cb524ddb181e94c26947fdca
|
[] |
no_license
|
huilizhou/Leetcode-pyhton
|
261280044d15d0baeb227248ade675177efdb297
|
6ae85bf79c5a21735e3c245c0c256f29c1c60926
|
refs/heads/master
| 2020-03-28T15:57:52.762162
| 2019-11-26T06:14:13
| 2019-11-26T06:14:13
| 148,644,059
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# 字符串中的单词数
class Solution(object):
def countSegments(self, s):
"""
:type s: str
:rtype: int
"""
# 人家的解法
s = s.strip()
if len(s) == 0:
return 0
else:
sum = 0
s = s.split(' ')
for v in s:
if v != '':
sum += 1
return sum
# return len(s.split())
print(Solution().countSegments("Hello, my name is John"))
|
[
"2540278344@qq.com"
] |
2540278344@qq.com
|
8a76e10c251ffe4ca5c2f5adaf0fea29dc57a5b3
|
29ec9a3ba90f12da111d3e25cf75bc7c3db5d8ac
|
/tests/test_core.py
|
27223608f8d79526ceb4cdbed94134a7a6f2049f
|
[] |
no_license
|
frnsys/drip
|
cf16d1d917dc7433bb2b279e6dcea18d0394a2ae
|
9b9733900c6ca799650e665f228c525dfa143476
|
refs/heads/master
| 2021-01-10T11:06:57.133358
| 2016-02-28T04:00:31
| 2016-02-28T04:00:31
| 52,692,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,581
|
py
|
import json
from datetime import datetime
from drip.datastore import db
from drip.cluster import cluster
from drip.nlp import title, multisummarize
from drip.models import Event, Story, Article, Feed, Source, Keyword
from tests import TestCase
class CoreTest(TestCase):
def setUp(self):
self.events = json.load(open('tests/data/events.json', 'r'))
self.source = Source('test source')
self.feed = Feed('http://nytimes.com', self.source)
db.session.add(self.source)
db.session.add(self.feed)
db.session.commit()
def article_factory(self, **kwargs):
defaults = {
'url': 'http://nytimes.com/sup',
'text': 'sup',
'html': '<h1>sup</h1>',
'title': 'Sup',
'image': 'http:://nytimes.com/sup.jpg',
'published': datetime(day=1, month=1, year=2015),
'authors': ['Yo Go'],
'keywords': ['sup', 'yo'],
'feed': self.feed
}
defaults.update(kwargs)
return Article(**defaults)
def test_title(self):
expected = [
'Jeremy Thorpe, former Liberal party leader, dies aged 85',
'Woman Arrested in U.S. Teacher\'s Stabbing Death in Abu Dhabi',
'Faces keyboardist Ian McLagan dies',
'China to stop using executed prisoners as source of organs for transplant',
'James Bond movie to be called Spectre'
]
for e, expected in zip(self.events, expected):
articles = [self.article_factory(title=a['title'], text=a['text']) for a in e]
t = title(articles)
self.assertEqual(t, expected)
def test_cluster(self):
articles = []
true_events = []
for e in self.events:
arts = [self.article_factory(title=a['title'], text=a['text']) for a in e]
true_events.append(arts)
articles += arts
clusters = cluster(articles, [])
# Clusters might not be in the same order as the true events
for clus in clusters:
for evs in true_events:
if set(clus.articles) == set(evs):
break
else:
self.fail('Cluster:\n\t{}\ndid not match any expected cluster'.format(
[a.title for a in clus.articles]
))
def test_summarize(self):
articles = []
for e in self.events:
articles = [self.article_factory(title=a['title'], text=a['text']) for a in e]
summary = multisummarize(articles)
# This is more of a placeholder test atm
self.assertTrue(isinstance(summary, list))
def test_keywords(self):
data = [
('This is a title: Spectre', 'The story is about Spectre'),
('A really cool title', 'Spectre is the new film'),
('Yet another title', 'The new title is Spectre')
]
events = []
articles = []
for _ in range(2):
arts = [self.article_factory(title=title, text=text, keywords=['spectre']) for title, text in data]
event = Event(arts[0])
for a in arts[1:]:
event.add(a)
event.update()
articles += arts
events.append(event)
db.session.add(event)
story = Story(events[0])
story.add(events[1])
story.update()
db.session.add(story)
db.session.commit()
keyword = Keyword.query.filter_by(name='spectre').first()
self.assertEqual(set(keyword.subjects.all()), set(articles + events + [story]))
def test_story_candidates(self):
data = [
('This is a title: Spectre', 'The story is about Spectre'),
('A really cool title', 'Spectre is the new film'),
('Yet another title', 'The new title is Spectre')
]
events = []
articles = []
for _ in range(3):
arts = [self.article_factory(title=title, text=text, keywords=['spectre']) for title, text in data]
event = Event(arts[0])
for a in arts[1:]:
event.add(a)
event.update()
articles += arts
events.append(event)
db.session.add(event)
story = Story(events[0])
story.add(events[1])
story.update()
db.session.add(story)
db.session.commit()
event = events[-1]
candidates = Story.candidates(event)
self.assertEqual(candidates[0][0], story)
|
[
"f+accounts@frnsys.com"
] |
f+accounts@frnsys.com
|
cb9110d27d0004f8563f7d1a8891ee2eb95d49ef
|
4bfc3c184e736bb68dccbb6d5657f11c950df002
|
/tests/common/test_run/atan2_run.py
|
93b020c539675af05a758ffe81d5f4fcf035e136
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
laekov/akg
|
159aa64ef6135222b5af784c408731275dfa9bdb
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
refs/heads/master
| 2022-12-01T04:09:03.548063
| 2020-08-19T08:38:57
| 2020-08-19T08:41:28
| 288,678,192
| 0
| 0
|
Apache-2.0
| 2020-08-19T08:41:30
| 2020-08-19T08:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""run function for arctangent2"""
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import atan2
from gen_random import random_gaussian
from base import get_rtol_atol
def atan2_run(shape1, dtype1, shape2, dtype2, attrs):
"""run function for arctangent2"""
mod = utils.op_build_test(atan2.atan2, [shape1, shape2], [dtype1, dtype2],
kernel_name="atan2", attrs=attrs)
expect, inputs, out_buf = gen_data(shape1, dtype1, shape2, dtype2)
output = utils.mod_launch(mod, (*inputs, out_buf), expect=expect)
rtol, atol = get_rtol_atol("atan2", dtype1)
cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return inputs, output, expect, cmp_res
def gen_data(shape1, dtype1, shape2, dtype2):
"""generate valid data for arctangent2"""
input1 = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
input2 = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
expect = np.arctan2(input1, input2)
out_buf = np.full(shape1, np.nan, dtype1)
return expect, (input1, input2), out_buf
|
[
"ckey.chengbin@huawei.com"
] |
ckey.chengbin@huawei.com
|
b45b2b0b78cb932e1697bb6f7b744db3618136d2
|
855e455b7113d32ad7bebca8e64cece441308b70
|
/adafruit_matrixportal/wifi.py
|
e9411a9fe01ef6b5288209b6bb65a68689dbccaa
|
[
"MIT"
] |
permissive
|
dsstewa/Adafruit_CircuitPython_MatrixPortal
|
f7687077d9d8ac83980f5ec75a52e1ca3942a1e3
|
885fb7edfbda0b763dbddbf9865d3fa62528e4c9
|
refs/heads/master
| 2023-02-09T23:03:50.751414
| 2021-01-05T18:22:43
| 2021-01-05T18:22:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_matrixportal.wifi`
================================================================================
Helper library for the MatrixPortal M4 or Adafruit RGB Matrix Shield + Metro M4 Airlift Lite.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit MatrixPortal M4 <https://www.adafruit.com/product/4745>`_
* `Adafruit Metro M4 Express AirLift <https://www.adafruit.com/product/4000>`_
* `Adafruit RGB Matrix Shield <https://www.adafruit.com/product/2601>`_
* `64x32 RGB LED Matrix <https://www.adafruit.com/product/2278>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import adafruit_requests as requests
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MatrixPortal.git"
class WiFi:
"""Class representing the ESP.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
"""
def __init__(self, *, status_neopixel=None, esp=None, external_spi=None):
if status_neopixel:
self.neopix = neopixel.NeoPixel(status_neopixel, 1, brightness=0.2)
else:
self.neopix = None
self.neo_status(0)
self.requests = None
if esp: # If there was a passed ESP Object
self.esp = esp
if external_spi: # If SPI Object Passed
spi = external_spi
else: # Else: Make ESP32 connection
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
else:
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_gpio0 = DigitalInOut(board.ESP_GPIO0)
esp32_reset = DigitalInOut(board.ESP_RESET)
esp32_cs = DigitalInOut(board.ESP_CS)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
self.esp = adafruit_esp32spi.ESP_SPIcontrol(
spi, esp32_cs, esp32_ready, esp32_reset, esp32_gpio0
)
requests.set_socket(socket, self.esp)
self._manager = None
gc.collect()
def connect(self, ssid, password):
"""
Connect to WiFi using the settings found in secrets.py
"""
self.esp.connect({"ssid": ssid, "password": password})
self.requests = requests
def neo_status(self, value):
"""The status NeoPixel.
:param value: The color to change the NeoPixel.
"""
if self.neopix:
self.neopix.fill(value)
def manager(self, secrets):
"""Initialize the WiFi Manager if it hasn't been cached and return it"""
if self._manager is None:
self._manager = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(
self.esp, secrets, None
)
return self._manager
@property
def is_connected(self):
"""Return whether we are connected."""
return self.esp.is_connected
@property
def enabled(self):
"""Not currently disablable on the ESP32 Coprocessor"""
return True
|
[
"melissa@adafruit.com"
] |
melissa@adafruit.com
|
72461308ceaf06759f6556c4bf62da939683c9d0
|
1575d5acc07eb67cb4e3cd523a24bb1d39efcb84
|
/nn-pima/cv2MLP.py
|
3518ebea717713ebf5dd3b51f3e480d47cf3ed97
|
[] |
no_license
|
ChenLiangbo/DeepLearning
|
4bd80ddb2a41b883ef70947a8b1fdb3b19656df0
|
3464c27116dc00bd597d2b9c25313964e1d89797
|
refs/heads/master
| 2020-12-24T12:39:27.666215
| 2017-05-09T13:49:44
| 2017-05-09T13:49:44
| 72,974,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
#!usr/bin/env/python
# -*- coding: utf-8 -*-
import numpy as np
from bayesClassifier import BayesClassifier
import cv2
dataset = np.load('pima-indians.npy')
columns = np.hsplit(dataset,9)
xsample = np.hstack(columns[0:8])
ysample = columns[8]
shape = xsample.shape
xsample = np.float32(xsample)
ysample = np.float32(ysample)
print "xsample = ",xsample.shape
print "ysample = ",ysample.shape
# indexList = np.random.permutation(shape[0])
indexList = range(shape[0])
x_train = xsample[indexList[0:538]]
y_train = ysample[indexList[0:538]]
print "x_train.shape = ",x_train.shape
print "y_train.shape = ",y_train.shape
x_test = xsample[indexList[538:]]
y_test = ysample[indexList[538:]]
print "x_test.shape = ",x_test.shape
print "y_test.shape = ",y_test.shape
myBayes = BayesClassifier()
layers = np.array([8,15,1])
model = cv2.ANN_MLP()
model.create(layers)
params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 3000, 0.01),
train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
bp_dw_scale = 0.001,
bp_moment_scale = 0.0 )
model.train(x_train,y_train,None,params = params)
ret,resp = model.predict(x_test)
y_predict = resp.argmax(-1)
print "y_predict = ",(y_predict.shape,np.mean(y_predict == y_test))
print y_predict[0:10]
result = myBayes.f_measure(y_predict,y_test)
print "result = ",result
|
[
"chenlb@polarwin.cn"
] |
chenlb@polarwin.cn
|
0c6469587fb87fb9776a74d943a5d7a7ee89bd7e
|
51e6234f683ed70207f53d6ee3f537c715082517
|
/test/setup.py
|
d98509b0fedc1c07bf389086c75fd98e825bbd80
|
[
"BSD-2-Clause"
] |
permissive
|
ninjaaron/fast-entry_points
|
d13d36ee7c4d73a425fba1d0f167aba1e8970127
|
a3a26f320c7ae2191fde71b79d4f4bf325d162f3
|
refs/heads/master
| 2021-10-09T16:16:14.618890
| 2021-10-07T11:55:03
| 2021-10-07T11:55:03
| 64,887,433
| 131
| 23
|
BSD-2-Clause
| 2021-06-21T19:49:44
| 2016-08-03T23:57:37
|
Python
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
from setuptools import setup
import fastentrypoints
setup(
name='dummypkg',
version='0.0.0',
py_modules=['dummy'],
description='dummy package for the test',
entry_points={'console_scripts': ['hello=dummy:main']},
)
|
[
"ninjaaron@gmail.com"
] |
ninjaaron@gmail.com
|
371e51252c8dd93d6a036b76d919eb54d33bbba8
|
237162607427106ae9564670d47427a62356861f
|
/core/migrations/0153_divisionlocation.py
|
87a4cdf31d325f60c95cbffe34744975f8ef282d
|
[] |
no_license
|
pitipund/basecore
|
8648c1f4fa37b6e6075fd710ca422fe159ba930e
|
a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b
|
refs/heads/master
| 2020-09-13T20:16:02.622903
| 2019-11-20T09:07:15
| 2019-11-20T09:07:15
| 221,885,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-01-14 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0029_auto_20190114_1040'),
('core', '0152_language'),
]
operations = [
migrations.CreateModel(
name='DivisionLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('division', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Division')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Location')),
],
options={
'verbose_name': 'สถานที่ตั้งหน่วยงาน',
'verbose_name_plural': 'สถานที่ตั้งหน่วยงาน',
},
),
]
|
[
"longman_694@hotmail.com"
] |
longman_694@hotmail.com
|
15551af86e94ba02d1ac2f0711c56efd691bcc5b
|
3ee04e8eef626c3d65f7b4ff218fbb01ba7dcff4
|
/main/migrations/0010_auto_20180331_1204.py
|
a80cf707c589b92029eb2d2c4ac58bd6c57d808f
|
[] |
no_license
|
David-OConnor/books
|
44499fba804394187103567b021252ecff9b906c
|
1c03b8c026de08eb1989e99171af01e7e8a7bbc9
|
refs/heads/master
| 2021-01-10T18:07:40.559040
| 2019-10-18T18:57:06
| 2019-10-18T18:57:06
| 43,619,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# Generated by Django 2.0.3 on 2018-03-31 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20180331_1117'),
]
operations = [
migrations.CreateModel(
name='AdelaideWork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('author_first', models.CharField(max_length=50)),
('author_last', models.CharField(max_length=50)),
('translator', models.CharField(blank=True, max_length=100, null=True)),
('url', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.AlterUniqueTogether(
name='adelaidework',
unique_together={('author_last', 'title')},
),
]
|
[
"david.alan.oconnor@gmail.com"
] |
david.alan.oconnor@gmail.com
|
3b0dbff77c453d04d05c68a3fe87fc404a795510
|
dc76018904675c64b6eb728d253a162802a584be
|
/urls.py
|
d4c2bf2fa81994fffdc6df56a2b8599172bf517e
|
[] |
no_license
|
edb-gjengen/mbftns
|
ee36b0e28b5d8f0200b1407bb3940f220a75e553
|
0434c7ec16743467602481615ef1b87bf53df565
|
refs/heads/master
| 2021-01-20T20:53:17.284180
| 2012-09-05T12:18:29
| 2012-09-05T12:18:29
| 65,237,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'main.views.index', name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('main.urls')),
url(r'^accounts/login', 'django.contrib.auth.views.login'),
url(r'^accounts/logout', 'django.contrib.auth.views.logout'),
)
# for dev
#from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#urlpatterns += staticfiles_urlpatterns()
|
[
"nikolaik@gmail.com"
] |
nikolaik@gmail.com
|
838eda0275474ae5c472ea23d89a3ed57c710874
|
1a29735113eeb8061527c9e785fb3e16abe10449
|
/lib/pymod/pymod/test/command/refresh.py
|
1ba750637b0f0e168538bde48a54c2f2fd2ceed5
|
[] |
no_license
|
tjfulle/Modulecmd.py
|
db3fb96db63e42666056e8086f433a779f5bfc86
|
42e3d34b76a53f4ff557e96ba2af3cb83b963ad2
|
refs/heads/master
| 2023-02-21T10:16:49.408099
| 2021-11-18T06:29:59
| 2021-11-18T06:29:59
| 141,306,544
| 0
| 0
| null | 2019-05-09T04:51:09
| 2018-07-17T15:09:16
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
import pytest
import pymod.mc
from pymod.main import PymodCommand
@pytest.fixture()
def modules_path(tmpdir, namespace, modulecmds):
m = modulecmds
one = tmpdir.mkdir("1")
one.join("a.py").write(m.setenv("a"))
one.join("b.py").write(m.setenv("b"))
one.join("c.py").write(m.setenv("c"))
one.join("d.py").write(m.setenv("d"))
ns = namespace()
ns.one = one.strpath
return ns
@pytest.mark.unit
def test_command_refresh(modules_path, mock_modulepath):
load = PymodCommand("load")
refresh = PymodCommand("refresh")
mock_modulepath(modules_path.one)
load("a", "b", "c", "d")
refresh()
loaded = "".join(_.fullname for _ in pymod.mc.get_loaded_modules())
assert loaded == "abcd"
|
[
"tjfulle@sandia.gov"
] |
tjfulle@sandia.gov
|
cf7bf48f6a2df6d3cec9391c5bb31ea49634341b
|
c2fcc0709ed113037201c707fcebe298966e5694
|
/tests/test_base.py
|
5eec23115c64d8b527e308c49cb795e72b51b47e
|
[] |
no_license
|
HyperSuprime-Cam/astshim
|
da1364f301847d8a2cdb6fad63e96aeb4780f694
|
a72da6bbfa9d1fec5e5d87d1aa560234f2b95958
|
refs/heads/master
| 2022-04-03T16:35:25.002206
| 2020-01-23T20:12:51
| 2020-01-23T20:12:51
| 109,919,007
| 0
| 0
| null | 2017-11-08T02:46:38
| 2017-11-08T02:46:38
| null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import unittest
import numpy as np
from numpy.testing import assert_equal
import astshim as ast
from astshim.test import ObjectTestCase
class TestBase(ObjectTestCase):
def test_arrayFromVector(self):
nAxes = 3
nValues = 5
np.random.seed(1)
dataVec = np.random.rand(nAxes * nValues)
desiredDataArr = dataVec.copy()
desiredDataArr.shape = (nAxes, nValues)
dataArr = ast.arrayFromVector(vec=dataVec, nAxes=nAxes)
assert_equal(dataArr, desiredDataArr)
dataArr2 = ast.arrayFromVector(vec=list(dataVec), nAxes=nAxes)
assert_equal(dataArr2, desiredDataArr)
# make sure dataArr is a deep copy; changing dataVec should
# not change dataArr
dataVec[0] += 10
assert_equal(dataArr, desiredDataArr)
for delta in (-1, 1):
badDataVec = np.random.rand(nAxes * nValues + delta)
with self.assertRaises(RuntimeError):
ast.arrayFromVector(vec=badDataVec, nAxes=nAxes)
if __name__ == "__main__":
unittest.main()
|
[
"rowen@uw.edu"
] |
rowen@uw.edu
|
9d2ef5da9fc5d97e998e8ead3784d778354cd46f
|
b06978b6020ce3240912ba5c131c4f38a86d7996
|
/Pycharm_files/Dictionaries/Chapter_5_reading.py
|
5200f5c68a50cfdff5f9eff732463af79a87c2c7
|
[] |
no_license
|
mn4774jm/PycharmProjects
|
95dc8ee6b89a85ba02d4134aa5b5bce11004647b
|
886bcf2400abc9a1f797fe98d09241f99fa16322
|
refs/heads/master
| 2021-08-09T10:20:27.907847
| 2020-09-04T15:21:21
| 2020-09-04T15:21:21
| 219,878,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
# # Birthdays.py
#
# birthdays = {'Alice':'Apr 1', 'Bob':'Dec 12', 'Carol':'Mar 4'}
#
# while True:
# print('Enter a name: (blank to quit)')
# name = input()
# if name =='':
# break
#
# if name in birthdays:
# print(birthdays[name] + ' is the birthday of '+ name)
# else:
# print('I do not have birthday information for '+name)
# print('What is their birthday?')
# bday = input()
# birthdays[name] = bday
# print('Birthday database updated.')
############################################
#using Data types in loops; .values(), .keys(), .items()
# spam = {'color': 'red', 'age': 42}
# #dict_keys
# # for k in spam.keys():
# # print(k)
#
# #dict_values
# # for v in spam.values():
# # print(v)
#
# #dict_items
# for i in spam.items():
# print(i)
#############################################
# #multiple assignment trick
# spam = {'color': 'red', 'age': 42}
# for k, v in spam.items():
# print('Key: ' + k + 'Value: ' + str(v))
#############################################
# #The get method; .get()
# #Because the value of cups in the dictionary is 2 it will be cups will print 2
# picnicItems = {'apples': 5, 'cups': 2}
# cups = 'I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.'
# print(cups)
#
# #because there is no key called eggs in the dictionary 0 will be printed
# eggs = 'I am bringing ' + str(picnicItems.get('eggs', 0)) + ' eggs.'
# print(eggs)
#############################################
#The setdefault() method
#used for setting value for a dictionary key whos value does not already exist
# spam = {'name': 'Pooka', 'age': 5}
# if 'color' not in spam:
# spam['color'] = 'black'
# print(spam)
# print(spam.keys())
#############################################
# #characterCount.py / prettyPrinting.py
# import pprint
# message = 'It was a bright cold day in April, and the clocks were striking thirteen'
# count = {}
#
# for character in message:
# count.setdefault(character,0)
# count[character] = count[character] +1
#
# print(pprint.pformat(count))
#############################################
#ticTacToe.py
# theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',
# 'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',
# 'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
#
# def printBoard(board):
# print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
# print('-+-+-')
# print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
# print('-+-+-')
# print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
#
# turn = 'X'
# for i in range(9):
# printBoard(theBoard)
# print('Turn for '+turn+'. Move on which space?')
# move = input()
# theBoard[move] = turn
# if turn == 'X':
# turn = 'O'
# else:
# turn = 'X'
#
# printBoard(theBoard)
##############################################
#totalBought example; nested dictionary
# allGuests = {'Alice': {'apples': 5, 'pretzels': 12},
# 'Bob': {'ham sandwiches': 3, 'apples': 2},
# 'Carol': {'cups': 3, 'apple pies': 1}}
#
# #Inside the loop, the string of the guest's names is assigned to k,
# #and the dictionary of picnic items is assigned to v.
# def totalBrought(guests, item):
# numBrought = 0
# for k, v in guests.items():
# # if item is not present its value will default to 0
# numBrought = numBrought + v.get(item, 0)
# return numBrought
#
# print('Number of things being brought:')
# print(' - Apples ' + str(totalBrought(allGuests, 'apples')))
# print(' - Cups ' + str(totalBrought(allGuests, 'cups')))
# print(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
# print(' - Ham Sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
# print(' - Apple Pies ' + str(totalBrought(allGuests, 'apple pies')))
|
[
"mn4774jm@go.minneapolis.edu"
] |
mn4774jm@go.minneapolis.edu
|
7642e99717c9f80209dd05eb0cc3bd3525ee7d19
|
92a1d7c2bb2119c67d9e33f1e48a1a02335772ce
|
/book/p8/8_10.py
|
3519916dc5e72c9ccce016fab3c0190fe9f2d6f8
|
[] |
no_license
|
zephyr123/blibli
|
b615fb2ee2f1f98deaf5709640e18d3be1a656ac
|
d45ba1bcce66dc1df185a475abe01f744c128c1b
|
refs/heads/master
| 2021-07-07T09:23:18.347107
| 2020-03-08T13:40:29
| 2020-03-08T13:40:29
| 244,829,368
| 0
| 0
| null | 2021-06-10T22:38:02
| 2020-03-04T06:55:15
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
def show_magicians(names):
for name in names:
print(name)
def make_great(names):
while names:
curr_names = "the Great " + names.pop()
mod_names.append(curr_names)
magic_names = ['liuqian','zhuxun','dongqing']
mod_names = []
make_great(magic_names)
show_magicians(mod_names)
|
[
"huys19@qq.com"
] |
huys19@qq.com
|
e8cb02f30831c8e4ad17d9e2d6f87fb1386d7d12
|
daee54824cb107f9b5749e3c12e7f09f544bac0e
|
/modules/vtk_basic/vtkJPEGWriter.py
|
cd7a99ec769de84b521d124207929c1fb3f9fdcc
|
[] |
no_license
|
JoonVan/devide
|
8fa556d2b42c5ad70c3595303253f2a171de0312
|
586225d68b079e2a96007bd33784113b3a19a538
|
refs/heads/master
| 2020-12-26T06:25:01.744966
| 2017-01-22T19:47:50
| 2017-01-22T19:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkJPEGWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkJPEGWriter(), 'Writing vtkJPEG.',
('vtkJPEG',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
[
"cpbotha@users.noreply.github.com"
] |
cpbotha@users.noreply.github.com
|
b17ffa7de9e79f3f88860099d16d5ecd324368a4
|
70f1c694bea6178c98b134b9c44952ef6693be9f
|
/Manuscript/figure/Figure_MITE_auto_promoter/Auto_target/scripts/make_activeTE-pep-msa-one.py
|
5f10b1bc98f0a04409b2db8ab671d463341e6a2d
|
[] |
no_license
|
JinfengChen/Rice_pop
|
5c19c5837805e51ddb3b2ffba4baffdc59c9bfd3
|
ef272bf4825b29610c94de55eb53f231fb5febc6
|
refs/heads/master
| 2020-04-07T04:55:36.606594
| 2018-03-02T16:52:53
| 2018-03-02T16:52:53
| 33,501,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
#!/usr/bin/env python
import sys
import os
import fnmatch
import os.path
import subprocess as subp
import fastaIO
args = sys.argv[1:]
def usage():
print """
Usage: make_activeTE-pep-msa.py <pep-cluster_MSA_folder> <match_pattern> <run_name> <found_superfamily_list>
"""
sys.exit(-1)
if (len(args) != 3 and len(args) != 4) or sys.argv[1] == '-h' or sys.argv[1] == '-help' or sys.argv[1] == '-H' or sys.argv[1] == '-Help' or sys.argv[1] == '--h' or sys.argv[1] == '--help':
usage()
top = '''#!/bin/bash
#!/bin/bash
#PBS -l nodes=1:ppn=1,mem=8gb,walltime=08:00:00 -j oe
module load stajichlab
module load perl/5.16.3
module load fasta
module load trimal
cd $PBS_O_WORKDIR
'''
middle = '''perl /rhome/cjinfeng/software/tools/mTEA/scripts/activeTE_msa.pl -p -a -f 26 '''
files = os.listdir(sys.argv[1])
out_handle = open("aTE-pep_" + sys.argv[3] + ".sh", "w")
print >>out_handle, top
for i in files:
if fnmatch.fnmatch(i, sys.argv[2]):
fpath = os.path.join(sys.argv[1], i)
if len(args) == 4:
full = middle + fpath + " " + sys.argv[4]
else:
full = middle + fpath
#out_handle = open("aTE-pep_" + sys.argv[3] + "_" + i + ".sh", "w")
print>>out_handle, full
print >>out_handle, '\n\necho "Done"'
out_handle.close()
|
[
"jinfeng7chen@gmail.com"
] |
jinfeng7chen@gmail.com
|
b02f08e27f8000cd103dda67c861f67cd6103769
|
77e303d8353170f4181ab9ff66ac77cb57d46caf
|
/src/508A.py
|
4df01f6a789a6ea263d31dfc23439685b3bc3af6
|
[
"MIT"
] |
permissive
|
viing937/codeforces
|
14f689f2e3360939912e927fb830c69f7116b35c
|
5bd8c2bec0e48cb2b4830c26849ea7fda447267c
|
refs/heads/master
| 2022-09-25T19:51:03.891702
| 2022-08-15T15:32:54
| 2022-08-15T15:32:54
| 32,905,529
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# coding: utf-8
n, m, k = [int(i) for i in input().split()]
mark = [[0 for i in range(m+2)] for j in range(n+2)]
for c in range(k):
i, j = [int(i) for i in input().split()]
mark[i][j] = 1
if ( mark[i-1][j-1]==1 and mark[i][j-1]==1 and mark[i-1][j]==1 ) \
or ( mark[i][j-1]==1 and mark[i+1][j]==1 and mark[i+1][j-1]==1 ) \
or ( mark[i][j+1]==1 and mark[i-1][j]==1 and mark[i-1][j+1]==1 ) \
or ( mark[i][j+1]==1 and mark[i+1][j]==1 and mark[i+1][j+1]==1 ):
print(c+1)
break
else:
print(0)
|
[
"viing937@gmail.com"
] |
viing937@gmail.com
|
09529835a8153d35821c70fe6e90354fc9ab7438
|
edcd74f8f65119bdbe737360c2ca33b4a6da160a
|
/python/problem-math/sum_of_square_numbers.py
|
0b0dfde875e4a0b998dba553d721bfd19bf60025
|
[] |
no_license
|
hyunjun/practice
|
72e83de6a1d5e04ddcd16526f16110ea2dd00373
|
5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67
|
refs/heads/master
| 2023-08-31T07:00:37.320351
| 2023-08-17T07:29:24
| 2023-08-17T07:29:24
| 2,704,126
| 3
| 2
| null | 2022-12-14T20:25:07
| 2011-11-03T18:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 807
|
py
|
# https://leetcode.com/problems/sum-of-square-numbers
# https://leetcode.com/problems/sum-of-square-numbers/solution
import math
class Solution:
# 90.88%
def judgeSquareSum(self, c):
if c < 0:
return False
if 0 == c:
return True
smaller, larger = 1, int(math.sqrt(c))
while smaller <= larger:
smaller = math.sqrt(c - larger ** 2)
if int(smaller) == smaller:
return True
larger -= 1
return False
s = Solution()
data = [(5, True),
(4, True),
(3, False),
(125, True),
(129, False),
]
for c, expected in data:
real = s.judgeSquareSum(c)
print('{}, expected {}, real {}, result {}'.format(c, expected, real, expected == real))
|
[
"agapelover4u@yahoo.co.kr"
] |
agapelover4u@yahoo.co.kr
|
adbb25263a000d69f883646bd0fbdb9d76a046b6
|
cb10a56ab0515703bf65c5d9ab6e9c75b2e53031
|
/src/images/training_images/try.py
|
d447233f1299786ae5526dc3aeb5b1f9382ae69a
|
[
"MIT"
] |
permissive
|
JuanMorenoS/Captcha-With-Neuronal-Network
|
2d09bc6e5ac308559aa7d8a0aa590b3c847e6022
|
3c4a119cb4df999011760caaa8f8271027de4897
|
refs/heads/master
| 2020-03-11T00:47:36.133301
| 2018-05-05T23:51:25
| 2018-05-05T23:51:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from PIL import Image
import pytesseract
import argparse
import cv2
import os
for i in range(4746,10001):
print(i,pytesseract.image_to_string(Image.open(str(i)+".jpg") ,config='-c tessedit_char_whitelist=abcdef0123456789') )
|
[
"="
] |
=
|
f23f7227996acd01fd328809befc31c62faa0a5b
|
ea178f0977127189c7559dfa9ca2faadceef5ff8
|
/python/jittor/test/test_new_fused_op.py
|
32b9c488039e6b263c6ec0c9b443abf92f54ef20
|
[
"Apache-2.0"
] |
permissive
|
AbbasMZ/jittor
|
a0bb5b2cbceeffb40c61405b863e7e4b91567756
|
fcec57f70422b52d6b8d0235e29f91fd2212f559
|
refs/heads/master
| 2023-06-20T07:07:22.952846
| 2021-07-15T14:40:54
| 2021-07-15T14:40:54
| 386,115,280
| 0
| 0
|
Apache-2.0
| 2021-07-15T00:42:22
| 2021-07-15T00:39:53
| null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Guowei Yang <471184555@qq.com>
# Dun Liang <randonlang@gmail.com>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import sys
import os
import jittor as jt
import unittest
import time
import numpy as np
from .test_log import find_log_with_re
class TestNewFuse(unittest.TestCase):
@classmethod
def setUpClass(self):
return
def check(self, h, w, cs, rs, pa, rtp, dim):
a = jt.random([h,w])
a.sync()
with jt.log_capture_scope(
log_v=0, log_vprefix="tuner_manager=100",
# this value is used for force compile
compile_options={"test_new_fused_op":1}
) as logs:
amean=jt.mean(a, dims=[dim], keepdims=1)
a2mean=jt.mean(a*a, dims=[dim], keepdims=1)
norm_aa=(a-amean.broadcast_var(a))/(jt.sqrt(a2mean-amean*amean).broadcast_var(a))
norm_aa.sync()
logs = find_log_with_re(logs,
"Run tuner reduce: confidence\\((.*)\\) candidates\\((.*)\\)$")
assert len(logs) == 3, logs
def test_new_fuse(self):
self.check(8192,8192, 0, 0, 0, 5, 0)
if __name__ == "__main__":
unittest.main()
|
[
"randonlang@gmail.com"
] |
randonlang@gmail.com
|
cb05cede811123fa5e3a317f94586695225fe6ed
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Generators/MadGraphModels/python/models/scalar_singlet_750_UFO/coupling_orders.py
|
fe4f223b9c8dbcde3bda5f37220247ddec40d888
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
# This file was automatically created by FeynRules 2.3.7
# Mathematica version: 10.2.0 for Linux x86 (64-bit) (July 28, 2015)
# Date: Thu 26 Nov 2015 09:52:28
from object_library import all_orders, CouplingOrder
QCD = CouplingOrder(name = 'QCD',
expansion_order = 99,
hierarchy = 1)
QED = CouplingOrder(name = 'QED',
expansion_order = 99,
hierarchy = 2)
NP = CouplingOrder(name = 'NP',
expansion_order = 99,
hierarchy = 1)
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
8dbc5c3a580ccb580ec81f61fc49b76815891337
|
e7b312b4cc3355f4ca98313ef2ac9f3b0d81f245
|
/kickstart/2018/round_c/a/make_large_input.py
|
04519b5c74a2e0bdcab79f66b28555502a3fd222
|
[] |
no_license
|
minus9d/programming_contest_archive
|
75466ab820e45ee0fcd829e6fac8ebc2accbbcff
|
0cb9e709f40460305635ae4d46c8ddec1e86455e
|
refs/heads/master
| 2023-02-16T18:08:42.579335
| 2023-02-11T14:10:49
| 2023-02-11T14:10:49
| 21,788,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
T = 100
print(T)
for t in range(T):
N = 1000
print(N)
used = set()
for n in range(2, N+1):
m = random.randint(1, n-1)
print("{} {}".format(n, m))
used.add((n, m))
used.add((m, n))
while True:
i = random.randint(1, N)
j = random.randint(1, N)
if i == j or (i, j) in used:
continue
else:
print("{} {}".format(i, j))
break
|
[
"minus9d@gmail.com"
] |
minus9d@gmail.com
|
66ac2692352f0a5f791832e0e94b339c0114130d
|
578bdcf2720805c1075ba348764983d99031911f
|
/Udacity/Hackerrank/Python Generators/prime_number.py
|
0cd0ae59c44eb3425187659a67b59dfde8620d90
|
[] |
no_license
|
mrudula-pb/Python_Code
|
994de4720289ded0a55017407d27b1d0f0b08c65
|
0dcdc6589d3c614bd1e6a03aa5c2b55664b9e6b2
|
refs/heads/master
| 2023-03-25T16:52:27.420925
| 2021-03-22T21:40:37
| 2021-03-22T21:40:37
| 350,476,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
def check_prime(number):
for divisor in range(2, int(number ** 0.5) + 1):
if number % divisor == 0:
return False
return True
value = check_prime(2)
print(value)
|
[
"mrudulapolavarapu@gmail.com"
] |
mrudulapolavarapu@gmail.com
|
f8357c5fbc9d1cc8439c6f4dcde1207b8d795b57
|
29f6b4804f06b8aabccd56fd122b54e4d556c59a
|
/CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/astroid/tests/resources.py
|
03d45623fecda846c17a7b4088d054413c44c475
|
[
"Apache-2.0"
] |
permissive
|
obahy/Susereum
|
6ef6ae331c7c8f91d64177db97e0c344f62783fa
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
refs/heads/master
| 2020-03-27T11:52:28.424277
| 2018-12-12T02:53:47
| 2018-12-12T02:53:47
| 146,511,286
| 3
| 2
|
Apache-2.0
| 2018-12-05T01:34:17
| 2018-08-28T21:57:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# Copyright 2014 Google, Inc. All rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import pkg_resources
from astroid import builder
from astroid import MANAGER
from astroid.bases import BUILTINS
DATA_DIR = 'testdata/python{}/'.format(sys.version_info[0])
def find(name):
return pkg_resources.resource_filename(
'astroid.tests', os.path.normpath(os.path.join(DATA_DIR, name)))
def build_file(path, modname=None):
return builder.AstroidBuilder().file_build(find(path), modname)
class SysPathSetup(object):
def setUp(self):
sys.path.insert(0, find(''))
def tearDown(self):
del sys.path[0]
datadir = find('')
for key in list(sys.path_importer_cache):
if key.startswith(datadir):
del sys.path_importer_cache[key]
class AstroidCacheSetupMixin(object):
"""Mixin for handling the astroid cache problems.
When clearing the astroid cache, some tests fails due to
cache inconsistencies, where some objects had a different
builtins object referenced.
This saves the builtins module and makes sure to add it
back to the astroid_cache after the tests finishes.
The builtins module is special, since some of the
transforms for a couple of its objects (str, bytes etc)
are executed only once, so astroid_bootstrapping will be
useless for retrieving the original builtins module.
"""
@classmethod
def setUpClass(cls):
cls._builtins = MANAGER.astroid_cache.get(BUILTINS)
@classmethod
def tearDownClass(cls):
if cls._builtins:
MANAGER.astroid_cache[BUILTINS] = cls._builtins
|
[
"abelgomezr45@gmail.com"
] |
abelgomezr45@gmail.com
|
2b8be2aeed918c270e1676da965fe5fdcb587c62
|
368c66467b78adf62da04cb0b8cedd2ef37bb127
|
/BOJ/Python/5430_AC.py
|
99d3c6a86c437339ec939eb8199c2b0eed1a0f4d
|
[] |
no_license
|
DJHyun/Algorithm
|
c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5
|
fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a
|
refs/heads/master
| 2020-07-30T16:32:49.344329
| 2020-02-25T07:59:34
| 2020-02-25T07:59:34
| 210,289,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
# baekjoon source = "https://www.acmicpc.net/problem/5430"
import sys
T = int(sys.stdin.readline())
for test_case in range(T):
meto = sys.stdin.readline().strip()
count = int(sys.stdin.readline())
len_meto = len(meto)
first = -1
c = sys.stdin.readline()
len_c = len(c)
c = c[1:len_c - 2].split(',')
rear = count - 1
if count == 0:
if 'D' in meto:
print('error')
else:
print('[]')
else:
flag = True
for i in range(len_meto):
if meto[i] == 'D':
if first == rear:
print('error')
break
if flag:
first += 1
c[first] = 0
else:
c[rear] = 0
rear -= 1
else:
if flag:
flag = False
else:
flag = True
else:
print('[', end='')
if flag:
for j in range(first + 1, rear + 1):
if j != rear:
print(c[j] + ',', end='')
else:
print(c[j], end='')
print(']')
else:
for j in range(rear, first, -1):
if j != first+1:
print(c[j] + ',', end='')
else:
print(c[j], end='')
print(']')
|
[
"djestiny4444@naver.com"
] |
djestiny4444@naver.com
|
30e1c976566bb28599db9f3287b764540219faef
|
30a37ab89a4a8101fb53308301628e8a7458d1fe
|
/test/functional/mempool_limit.py
|
cec38f48e5de886c44c4177d801a91ffd1a9f984
|
[
"MIT"
] |
permissive
|
BFBCOIN/bfbcoin-core
|
1001e55f54a073ac645443c40fd5c7e6d117c07c
|
7c3b6dcc8e63f8041331846e0d8230c8db059e23
|
refs/heads/master
| 2020-04-03T15:31:05.749852
| 2018-10-30T11:36:00
| 2018-10-30T11:36:00
| 155,365,385
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The bfb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import bfbTestFramework
from test_framework.util import *
class MempoolLimitTest(bfbTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1e7107daf91ac3e6cc7dc2170334bdc94649cb89
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5706278382862336_0/Python/AlonH/2014C1A.py
|
c5e1e86cdaf0c09e44104a4a883bdd4b1e7750b8
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
import math
f = open("A-small-attempt0.in","r")
o = open("A-small-answers.txt","w")
T = int(f.readline())
for t in range(1,T+1):
inp = [float(a) for a in f.readline().split("/")]
p = inp[0]
q = inp[1]
print(p,"/",q)
b = int(math.log(q,2))
print(b)
p = p/(q/(2**b))
print(p)
a = int(math.log(p,2))
if p%1 != 0:
o.write("Case #"+str(t)+": impossible"+"\n")
else:
o.write("Case #"+str(t)+": "+str(b-a)+"\n")
o.close()
#o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
#A-small-attempt0.in
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
916921a156a5df7facf6056f5dbcab774da038e9
|
0e0cf67455424e68705c428fc2a5cd71e74879f6
|
/practico_03/ejercicio_04.py
|
378e079e9a55c528232d49537c3db72bfe3d1fce
|
[] |
no_license
|
JoacoDiPerna/frro-soporte-2019-12
|
cf43a3a6f722350891051816aac9d7e50a91add4
|
f918c094346ba350c0672596fe316c60ae8fdc7c
|
refs/heads/master
| 2020-04-29T00:29:57.757014
| 2019-08-09T19:39:30
| 2019-08-09T19:39:30
| 175,695,790
| 1
| 0
| null | 2019-08-09T19:39:32
| 2019-03-14T20:38:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
# Implementar la funcion buscar_persona, que devuelve el registro de una persona basado en su id.
# El return es una tupla que contiene sus campos: id, nombre, nacimiento, dni y altura.
# Si no encuentra ningun registro, devuelve False.
import datetime
from practico_03.ejercicio_01 import create_connection
from practico_03.ejercicio_01 import reset_tabla
from practico_03.ejercicio_02 import agregar_persona
from getpass import getuser
def buscar_persona(id_persona):
conn = create_connection(
'C:\\Users\\' + getuser() + '\\Desktop\\tps_python.db')
sql = "SELECT * FROM personas WHERE id_persona=? ORDER BY id_persona ASC"
cur = conn.cursor()
cur.execute(sql, (id_persona,))
rows = cur.fetchall()
cur.close()
conn.commit()
conn.close()
return False if not rows else rows[0]
@reset_tabla
def pruebas():
juan = buscar_persona(agregar_persona(
'juan perez', datetime.datetime(1988, 5, 15), 32165498, 180))
assert juan == (1, 'juan perez', '1988-05-15', 32165498, 180)
assert buscar_persona(12345) is False
if __name__ == '__main__':
pruebas()
|
[
"franmrivera@gmail.com"
] |
franmrivera@gmail.com
|
b67611e1022940616dfa02c1031329d904b7f33e
|
426742533fc90d9240d01f0d0347a1648cc91430
|
/freeways/migrations/0002_auto_20150621_0106.py
|
ddf17e24c9e4270ee812a4c5a5b93968f01785a1
|
[] |
no_license
|
kdechant/freeways
|
5093a67ad917cf02fab4706ea21b81b05d41e84a
|
8a9b73304db06c2423b2729bbe8f72aaa1bff53d
|
refs/heads/master
| 2021-09-29T05:39:48.687864
| 2020-01-08T04:32:42
| 2020-01-08T04:33:46
| 37,783,501
| 0
| 0
| null | 2021-06-10T17:35:47
| 2015-06-20T19:45:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('freeways', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='routesegment',
name='ring',
),
migrations.AddField(
model_name='routesegment',
name='distance_from_origin',
field=models.DecimalField(editable=False, default=0, max_digits=6, decimal_places=3),
),
migrations.AlterField(
model_name='routesegment',
name='geojson',
field=models.TextField(editable=False, null=True, blank=True),
),
migrations.AlterField(
model_name='routesegment',
name='lane_miles',
field=models.DecimalField(editable=False, default=0, max_digits=5, decimal_places=2),
),
migrations.AlterField(
model_name='routesegment',
name='length',
field=models.DecimalField(editable=False, default=0, max_digits=5, decimal_places=2),
),
]
|
[
"keith.dechant@gmail.com"
] |
keith.dechant@gmail.com
|
b12fd17fff74d25b03c124c311932fb6787afc78
|
b34808a8571340dcb3d70bd29d59930a6a3e4463
|
/catalogueapp/views.py
|
eb6bf54a399a469eece4d25c8cec86c633a415a4
|
[] |
permissive
|
OpenDataServices/iCAN-Scot-Catalogue
|
110ee5030b258555a45e9061feb97a5ce031cc48
|
205cf3e6e8ef984f3f4e3d89537c21bdafb805a1
|
refs/heads/master
| 2021-08-04T08:31:15.411708
| 2020-06-25T09:24:32
| 2020-06-25T09:34:27
| 191,113,427
| 1
| 1
|
BSD-3-Clause
| 2020-06-05T21:15:14
| 2019-06-10T06:52:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,626
|
py
|
from django.shortcuts import render, redirect
from django.http import Http404, JsonResponse
from django.contrib.auth.decorators import permission_required, login_required
from catalogueapp.forms import AddForm, EditOrganisationForm
from catalogueapp.tools import ALISS_URL, ALISS_Importer
from catalogueapp.models import Service, Organisation
def index(request):
context = {
'search': request.GET.get('search', ''),
}
if context['search']:
context['organisations'] = Organisation.objects.raw(
"""
SELECT * FROM (
SELECT catalogueapp_organisation.*,
to_tsvector(
catalogueapp_organisation.name || ' ' ||
catalogueapp_organisation.description || ' ' ||
catalogueapp_organisation.our_description_markdown || ' ' ||
array_agg(catalogueapp_service.name)::text || ' ' ||
array_agg(catalogueapp_service.description)::text
) AS search_vector
FROM catalogueapp_organisation
JOIN catalogueapp_service ON catalogueapp_service.organisation_id = catalogueapp_organisation.id
WHERE catalogueapp_service.active = '1'
GROUP BY catalogueapp_organisation.id
ORDER BY catalogueapp_organisation.name ASC
) AS data
WHERE search_vector @@ to_tsquery(%s)
""",
[context['search']]
)
else:
context['organisations'] = Organisation.objects.raw(
"""SELECT catalogueapp_organisation.* FROM catalogueapp_organisation
JOIN catalogueapp_service ON catalogueapp_service.organisation_id = catalogueapp_organisation.id
WHERE catalogueapp_service.active = '1'
GROUP BY catalogueapp_organisation.id
ORDER BY catalogueapp_organisation.name ASC """,
)
return render(request, 'catalogueapp/index.html', context)
def organisation_index(request, aliss_id):
context = {
'organisation': Organisation.objects.get(aliss_id=aliss_id),
}
context['services'] = Service.objects.filter(organisation=context['organisation'], active=True)
if not context['services']:
raise Http404
return render(request, 'catalogueapp/organisation/index.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def adminindex(request):
context = {}
return render(request, 'catalogueapp/admin/index.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_add(request):
context = {}
if request.method == 'POST':
context['form'] = AddForm(request.POST)
if context['form'].is_valid():
url = ALISS_URL(context['form'].cleaned_data['url'])
if url.is_service():
importer = ALISS_Importer()
service = importer.import_from_service_URL(url)
importer.update_organisation(service.organisation)
return redirect('admin_service_index', aliss_id=service.aliss_id)
else:
context['form'].add_error('url', "That does not look like a service URL?")
else:
context['form'] = AddForm()
return render(request, 'catalogueapp/admin/add.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_service_list(request):
context = {
'services': Service.objects.all(),
}
return render(request, 'catalogueapp/admin/services.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_service_index(request, aliss_id):
context = {
'service': Service.objects.get(aliss_id=aliss_id),
}
if request.method == 'POST':
if request.POST['action'] == 'update':
importer = ALISS_Importer()
importer.update_service(context['service'])
elif request.POST['action'] == 'inactive':
context['service'].active = False
context['service'].save()
elif request.POST['action'] == 'active':
context['service'].active = True
context['service'].save()
return render(request, 'catalogueapp/admin/service/index.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_organisation_list(request):
context = {
'organisations': Organisation.objects.all(),
}
return render(request, 'catalogueapp/admin/organisations.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_organisation_index(request, aliss_id):
context = {
'organisation': Organisation.objects.get(aliss_id=aliss_id),
}
context['services'] = Service.objects.filter(organisation=context['organisation'])
if request.method == 'POST' and request.POST['action'] == 'update':
importer = ALISS_Importer()
importer.update_organisation(context['organisation'])
return render(request, 'catalogueapp/admin/organisation/index.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_organisation_edit(request, aliss_id):
context = {
'organisation': Organisation.objects.get(aliss_id=aliss_id),
}
if request.method == 'POST':
context['form'] = EditOrganisationForm(request.POST, instance=context['organisation'])
if context['form'].is_valid():
context['organisation'].our_description_markdown = context['form'].cleaned_data['our_description_markdown']
context['organisation'].save()
return redirect('admin_organisation_index', aliss_id=context['organisation'].aliss_id)
else:
context['form'] = EditOrganisationForm(instance=context['organisation'])
return render(request, 'catalogueapp/admin/organisation/edit.html', context)
@permission_required('catalogueapp.catalogueadmin', login_url='/accounts/login/')
def admin_organisation_edit_preview(request, aliss_id):
context = {
'organisation': Organisation.objects.get(aliss_id=aliss_id),
}
context['organisation'].our_description_markdown = request.POST.get('description_markdown', '')
return JsonResponse(
{'description_markdown_html': context['organisation'].get_our_description_markdown_html()}
)
@login_required()
def user_profile(request):
context = {}
return render(request, 'registration/profile.html', context)
|
[
"james.baster@opendataservices.coop"
] |
james.baster@opendataservices.coop
|
b7e62fcc6d1d6be43665ce941a5acff73bb88b22
|
92207eb2c2d8014da01831c3273efc581929f5c7
|
/step1/app.py
|
95d48d2908cb9ca1917bf5984e015f3806c89131
|
[] |
no_license
|
garetroy/createyourownserver
|
8de61511a96f65330056f06c23a5a5d880193248
|
768c587fb14a047ba838caca28f8ff519f10cb4b
|
refs/heads/master
| 2020-03-26T04:18:21.643119
| 2018-08-12T20:37:55
| 2018-08-12T20:37:55
| 144,496,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
import flask, os
from flask import render_template, Flask
app = flask.Flask(__name__)
@app.route('/')
def home():
return flask.render_template('home.html')
@app.route('/page2')
def page2():
return flask.render_template('secondpage.html')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(debug=True, host='0.0.0.0', port=port)
|
[
"mygarett@gmail.com"
] |
mygarett@gmail.com
|
564417eb8bcf4a0b26bad12b6b03cb0d0390b708
|
ce6538b5b7da162c1c690a346e7ec9ae0a6291f3
|
/glass_mine.py
|
6247053e40d8deebcb40be09594181b1019c11c7
|
[] |
no_license
|
DaniTodorowa/Softuni
|
391f13dd61a6d16cd48ee06e9b35b2fd931375df
|
f7c875fda4e13ec63152671509aaa6eca29d7f50
|
refs/heads/master
| 2022-11-25T23:34:49.744315
| 2020-08-02T08:23:44
| 2020-08-02T08:23:44
| 278,938,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
class Glass:
capacity = 250
def __init__(self):
self.content = 0
def fill(self, ml):
if (self.capacity - self.content) >= ml:
self.content += ml
return f"Glass filled with {ml} ml"
return f"Cannot add {ml} ml"
def empty(self):
self.content = 0
return "Glass is now empty"
def info(self):
return f"{Glass.capacity - self.content} ml left"
|
[
"danitodorova2106@gmail.com"
] |
danitodorova2106@gmail.com
|
072e921e8d2f60228a301e318a11571a82146dd8
|
c2e49d32b2613d702dd06067bd0ec7846a319fd5
|
/arelle/DialogArcroleGroup.py
|
cc9e7e8f45295a1a9ffd5633177bd3217e776755
|
[
"Apache-2.0"
] |
permissive
|
hamscher/Arelle
|
c9a020a5955a313c14a4db3a4e7122ec9599714c
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
refs/heads/master
| 2023-08-24T14:12:49.055954
| 2021-10-17T16:55:56
| 2021-10-17T16:55:56
| 284,703,106
| 0
| 0
|
Apache-2.0
| 2020-08-10T15:48:15
| 2020-08-03T13:08:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,702
|
py
|
'''
Created on Jun 15, 2012
@author: Mark V Systems Limited
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
from tkinter import Toplevel, N, S, E, W, PhotoImage
try:
from tkinter.ttk import Frame, Button
except ImportError:
from ttk import Frame, Button
import os, re
from arelle.UiUtil import gridHdr, gridCell, gridCombobox, label, checkbox
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
'''
caller checks accepted, if True, caller retrieves url
'''
def getArcroleGroup(mainWin, modelXbrl):
dialog = DialogArcroleGroup(mainWin, modelXbrl)
return dialog.selectedGroup
class DialogArcroleGroup(Toplevel):
def __init__(self, mainWin, modelXbrl):
parent = mainWin.parent
super(DialogArcroleGroup, self).__init__(parent)
self.mainWin = mainWin
self.parent = parent
self.modelXbrl = modelXbrl
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.selectedGroup = None
self.transient(self.parent)
self.title(_("Select Arcrole Group"))
frame = Frame(self)
'''
dialogFrame = Frame(frame, width=500)
dialogFrame.columnconfigure(0, weight=1)
dialogFrame.rowconfigure(0, weight=1)
dialogFrame.grid(row=0, column=0, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
'''
# mainWin.showStatus(_("loading formula options and parameters"))
# load grid
groupLabel = label(frame, 1, 0, _("Group:"))
self.arcroleGroups = mainWin.config.get("arcroleGroups", {})
arcroleGroupSelected = self.mainWin.config.get("arcroleGroupSelected")
if arcroleGroupSelected in self.arcroleGroups:
arcroleGroup = self.arcroleGroups[arcroleGroupSelected]
else:
arcroleGroup = []
arcroleGroupSelected = None
self.groupName = gridCombobox(frame, 2, 0,
value=arcroleGroupSelected,
values=sorted(self.arcroleGroups.keys()),
comboboxselected=self.comboBoxSelected)
groupToolTipMessage = _("Select an existing arcrole group, or enter a name for a new arcrole group. "
"If selecting an existing group, it can be edited, and changes will be saved in the config file. "
"If nothing is changed for an existing group, the saved setting is not disturbed. "
"Arcroles with checkboxes below are shown only for arcroles that have relationships in the loaded DTS, "
"but if an existing group is selected with more arcroles (that were not in the current DTS) then "
"the prior setting with not-present arcroles is preserved. ")
ToolTip(self.groupName, text=groupToolTipMessage, wraplength=360)
ToolTip(groupLabel, text=groupToolTipMessage, wraplength=360)
clearImage = PhotoImage(file=os.path.join(mainWin.imagesDir, "toolbarDelete.gif"))
clearGroupNameButton = Button(frame, image=clearImage, width=12, command=self.clearGroupName)
clearGroupNameButton.grid(row=0, column=3, sticky=W)
ToolTip(clearGroupNameButton, text=_("Remove the currently selected arcrole group from the config file. "
"After removing, you may select another arcrole, but must select 'OK' for the "
"removal to be saved. "),
wraplength=240)
arcrolesLabel = label(frame, 1, 1, _("Arcroles:"))
ToolTip(arcrolesLabel, text=_("Shows all the arcroles that are present in this DTS. "),
wraplength=240)
from arelle.ModelRelationshipSet import baseSetArcroles
self.options = {}
self.checkboxes = []
y = 1
for name, arcrole in baseSetArcroles(self.modelXbrl):
if arcrole.startswith("http://"):
self.options[arcrole] = arcrole in arcroleGroup
self.checkboxes.append(
checkbox(frame, 2, y,
name[1:],
arcrole,
columnspan=2)
)
y += 1
mainWin.showStatus(None)
self.options[XbrlConst.arcroleGroupDetect] = XbrlConst.arcroleGroupDetect in arcroleGroup
self.autoOpen = checkbox(frame, 1, y, _("detect"), XbrlConst.arcroleGroupDetect)
self.autoOpen.grid(sticky=W, columnspan=2)
self.checkboxes.append(self.autoOpen)
ToolTip(self.autoOpen, text=_("If checked, this arcrole group will be detected if any arcrole of the group is present in a DTS, for example to open a treeview pane. "),
wraplength=240)
okButton = Button(frame, text=_("OK"), width=8, command=self.ok)
cancelButton = Button(frame, text=_("Cancel"), width=8, command=self.close)
cancelButton.grid(row=y, column=1, sticky=E, columnspan=3, pady=3, padx=3)
okButton.grid(row=y, column=1, sticky=E, columnspan=3, pady=3, padx=64)
ToolTip(okButton, text=_("Open a treeview with named arcrole group and selected arcroles. "
"If any changes were made to checkboxes or name, save in the config. "),
wraplength=240)
ToolTip(cancelButton, text=_("Close this dialog, without saving arcrole group changes or opening a view pane. "),
wraplength=240)
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(1, weight=3)
frame.columnconfigure(2, weight=1)
frame.columnconfigure(3, weight=3)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
#self.bind("<Return>", self.ok)
#self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def ok(self, event=None):
groupName = self.groupName.value
arcrolesSelected = [checkbox.attr for checkbox in self.checkboxes if checkbox.value]
if groupName:
self.mainWin.config["arcroleGroupSelected"] = groupName
if groupName not in self.arcroleGroups or any(checkbox.isChanged for checkbox in self.checkboxes):
self.arcroleGroups[groupName] = arcrolesSelected
self.mainWin.config["arcroleGroups"] = self.arcroleGroups
self.mainWin.saveConfig()
self.selectedGroup = (groupName, arcrolesSelected)
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def comboBoxSelected(self, *args):
arcroles = self.arcroleGroups.get(self.groupName.value, [])
for checkbox in self.checkboxes:
checkbox.valueVar.set( checkbox.attr in arcroles )
checkbox.isChanged = False
def clearGroupName(self):
groupName = self.groupName.value
if groupName and groupName in self.arcroleGroups:
del self.arcroleGroups[groupName]
self.groupName.valueVar.set('')
self.groupName["values"] = sorted(self.arcroleGroups.keys())
for checkbox in self.checkboxes:
checkbox.valueVar.set( False )
checkbox.isChanged = False
|
[
"fischer@markv.com"
] |
fischer@markv.com
|
8ddae95edc4c8a92a22c923d4aa6fc4611593209
|
23f59ad45ac6b889f40b029a506fcc310c06aadb
|
/web/tests/test_request_context.py
|
99450ac6e3553a206fe50cc8847514944f406b5b
|
[] |
no_license
|
ForeverDreamer/im
|
bac08b9b28fbdd7cc0ced9c4c1f152f318ecb670
|
962c512226422b00d12dbb31de3d448eca4cbbdc
|
refs/heads/main
| 2023-07-17T17:19:34.400825
| 2021-08-12T10:07:11
| 2021-08-12T10:07:11
| 369,503,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from flask import request
from web.app import app
def test_request_context():
with app.test_request_context('/?name=Peter'):
assert request.path == '/'
assert request.args['name'] == 'Peter'
|
[
"499361328@qq.com"
] |
499361328@qq.com
|
e44f26b7a18c8d4a5582ff84ed15f48867d9be49
|
2eb779146daa0ba6b71344ecfeaeaec56200e890
|
/python/oneflow/test/modules/test_adaptive_pool.py
|
d8b2c08d0e1713a48de811b2e58505d8ab5d9f1a
|
[
"Apache-2.0"
] |
permissive
|
hxfxjun/oneflow
|
ee226676cb86f3d36710c79cb66c2b049c46589b
|
2427c20f05543543026ac9a4020e479b9ec0aeb8
|
refs/heads/master
| 2023-08-17T19:30:59.791766
| 2021-10-09T06:58:33
| 2021-10-09T06:58:33
| 414,906,649
| 0
| 0
|
Apache-2.0
| 2021-10-09T06:15:30
| 2021-10-08T08:29:45
|
C++
|
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.nn.common_types import _size_1_t
from packaging import version
import torch as torch_original
from typing import Union, Tuple
from oneflow.test_utils.automated_test_util import *
NoneType = type(None)
# Not the same as those in PyTorch because 'output_size' cannot be NoneType (even in 'torch.nn.AdaptiveAvgPoolXd')
_size_2_opt_t_not_none = Union[int, Tuple[Union[int, NoneType], Union[int, NoneType]]]
_size_3_opt_t_not_none = Union[
int, Tuple[Union[int, NoneType], Union[int, NoneType], Union[int, NoneType]]
]
@flow.unittest.skip_unless_1n1d()
class TestAdaptiveAvgPool(flow.unittest.TestCase):
@autotest()
def test_adaptive_avgpool1d(test_case):
m = torch.nn.AdaptiveAvgPool1d(output_size=random().to(_size_1_t))
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=3).to(device)
y = m(x)
return y
@autotest()
def test_adaptive_avgpool2d(test_case):
m = torch.nn.AdaptiveAvgPool2d(output_size=random().to(_size_2_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=4).to(device)
y = m(x)
return y
@unittest.skipIf(
version.parse(torch_original.__version__) < version.parse("1.10.0"),
"GPU version 'nn.AdaptiveAvgPool3d' has a bug in PyTorch before '1.10.0'",
)
@autotest()
def test_adaptive_avgpool3d(test_case):
m = torch.nn.AdaptiveAvgPool3d(output_size=random().to(_size_3_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=5).to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestAdaptiveAvgPoolFunctional(flow.unittest.TestCase):
@autotest()
def test_adaptive_avgpool1d_functional(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=3).to(device)
return torch.nn.functional.adaptive_avg_pool1d(x, output_size=random().to(int))
@autotest()
def test_adaptive_avgpool2d_functional(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
return torch.nn.functional.adaptive_avg_pool2d(x, output_size=random().to(int))
@unittest.skipIf(
version.parse(torch_original.__version__) < version.parse("1.10.0"),
"GPU version 'nn.AdaptiveAvgPool3d' has a bug in PyTorch before '1.10.0'",
)
@autotest()
def test_adaptive_avgpool3d_functional(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=5).to(device)
return torch.nn.functional.adaptive_avg_pool2d(x, output_size=random().to(int))
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
hxfxjun.noreply@github.com
|
79d739837ad0c7eb49a3da4aa3773672688ce62f
|
4b3c4878b48b618608d42de1a7b476a37b46b9b8
|
/atcoder/abc100c.py
|
065fe6541fd2bf67959f14a90eec4756d9e3005f
|
[] |
no_license
|
zfhrp6/competitive-programming
|
3efd2a35717974c5ed9af364181a81144b6c8f19
|
459e7106ac4ba281217ce80cdc014023ce794bc3
|
refs/heads/master
| 2021-01-10T01:42:20.496027
| 2019-08-03T12:10:43
| 2019-08-03T12:10:43
| 51,929,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
N = int(input())
a = list(map(int, input().split()))
def div2count(num):
ret = 0
while num % 2 == 0:
ret += 1
num = num // 2
return ret
print(sum(map(div2count, a)))
|
[
"coricozizi@gmail.com"
] |
coricozizi@gmail.com
|
870a6ee86f3d3892ed79a10bc1bd26231e441502
|
fb8792f0a62f3b3658197a7aabd6aeecf8e311c9
|
/news/views.py
|
6ea24217bedd4ceae946a571f306a01414e25d96
|
[] |
no_license
|
munisisazade/matrix_tutorial
|
5ddb3af6ba958e663465356de24ae1e3112f5559
|
22718e2b937bc8856ac024059b9ba2780af38f7e
|
refs/heads/master
| 2020-03-22T06:11:36.399412
| 2018-07-11T17:59:43
| 2018-07-11T18:00:14
| 139,616,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from news.models import Article
from news.forms import ArticleForm
from django.contrib import messages
from django.core.paginator import Paginator
# Create your views here.
def index(request):
if request.method == 'GET':
obj = {}
obj["form"] = ArticleForm()
news = Article.objects.all()
news_list = Paginator(news, 2)
page = request.GET.get('page')
if page:
obj["news_list"] = news_list.page(page)
else:
obj["news_list"] = news_list.page(1)
return render(request, "index.html", obj)
else:
context = {}
form = ArticleForm(request.POST)
if form.is_valid():
# Article.objects.create(
# title=form.cleaned_data['title'],
# description=form.cleaned_data['description']
# )
article = form.save()
context["news_list"] = Article.objects.all()
context["form"] = form
messages.success(request, "Form ugurla dolduruldu")
return render(request, "index.html", context)
def detail(request, name):
obj = {}
obj["object"] = Article.objects.get(id=name)
return render(request, "detail.html", obj)
|
[
"munisisazade@gmail.com"
] |
munisisazade@gmail.com
|
8ef6aae5ce529d7db5c1522ac34dfb38391949bd
|
7bc0075367290ff06565991e19033b13f0604f96
|
/Mundo 3/aula16/desafio073.py
|
74b06bdc97ec15c23b8957a536d52baf2183d0f8
|
[] |
no_license
|
iamtheluiz/curso_em_video_python
|
298acd90e36473fbf797ba7bf85d729d0ca28407
|
aa4247b7d206771f9c9b08ad5d8585c3813ddaff
|
refs/heads/master
| 2020-04-12T16:17:51.672662
| 2019-01-22T00:10:41
| 2019-01-22T00:10:41
| 162,608,169
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
# imports
print("""
|******************|
| Desafio073 |
|******************|
""")
print("Tabela do Brasileirão!")
# Variáveis
tabela = ('Palmeiras', 'Flamengo', 'Internacional', 'Grêmio', 'São Paulo', 'Atlético-MG', 'Athletico-PR', 'Cruzeiro', 'Botafogo', 'Santos', 'Bahia', 'Fluminense', 'Corinthians', 'Chapecoense', 'Ceará', 'Vasco', 'Sport', 'América-MG', 'Vitória', 'Paraná')
print("==== 5 Primeiros ====")
for pos, time in enumerate(tabela[:5]):
print(f'{pos + 1}º => {time}')
print("==== 4 Últimos ====")
for pos in range(len(tabela) - 4, len(tabela)):
print(f'{pos + 1}º => {tabela[pos]}')
print("==== Times em Ordem Alfabética ====")
for time in sorted(tabela):
print(time)
print("==== Onde tá a Chapecoense?? ====")
print(f'Ela está em {tabela.index("Chapecoense") + 1}º lugar!')
|
[
"iamtheluiz.dev@gmail.com"
] |
iamtheluiz.dev@gmail.com
|
dff03e0fa17d53d82ad062bc2d6e9dd9b6101a86
|
24a13b07623ce41e57ea4df1fcce0befb14d3406
|
/model/densenet201/model4_val5.py
|
55e955e927b2209bb720ad20719f51cf1d10ecd0
|
[
"MIT"
] |
permissive
|
shorxp/jd-fashion
|
5f37e6c2235200944869e9a0da4d741c89d63b9e
|
817f693672f418745e3a4c89a0417a3165b08130
|
refs/heads/master
| 2021-09-22T18:40:13.030601
| 2018-09-13T13:50:05
| 2018-09-13T13:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,032
|
py
|
"""
以model1为原型,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[16, 16, 16],
val_batch_size=256,
predict_batch_size=256,
initial_epoch=2,
epoch=[1, 4, 10],
lr=[0.0005, 0.00005, 0.000005],
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet201(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
|
[
"aa531811820@gmail.com"
] |
aa531811820@gmail.com
|
dd8785c0296e0a16b4aa8fe1670176cd02aefe7c
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_8_390c.py
|
35f9f192188dc8a48a12c80b619a504e65823cbe
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
#
#
# Euler 278
#
#
from time import time
from Functions import primes,RetFact
from collections import defaultdict
from math import sqrt
def FactorSieve(n):
n += 1
f = defaultdict(list)
for p in xrange(2, n):
if p not in f:
for i in xrange(p + p, n, p):
j, k = i, 1
while j % p == 0:
j //= p
k *= p
f[i].append(p)
if f[p]==[]:f[p]=[p]
return f
st=time()
#v(1+b2), v(1+c2) and v(b2+c2)
#F=FactorSieve(1250002)
F=FactorSieve(1250)
def T(b,c):
a2 = b*b +1
b2 = c*c +1
#c2 = b*b + c*c
#return (((4*a2*b2) - (a2 + b2 - c2)**2)**.5)/4.
#A4=(((4*a2*b2) - 4)**.5)
A42 = (a2*b2) - 1
#A = pow(A42,.5)/2
#A = (A42**.5)/2
A = sqrt(A42)/2
return A
n=10**8
summ = 0
for i in xrange(2,22872,2):
for j in xrange(i,50000000,2):
A = T(i,j)
if int(A)==A and A<=n:
summ += int(A)
# if j in F:
#print i,j,int(A) #,F[i],F[j]
# else:
# print "! ",i,j,int(A) #,F[i],RetFact(j) #,RetFact(A)
if A>n:
#print "overflow at", i,j
break
print "sum of funky triangles is ",summ
print "time elapsed ", time()-st
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
6c3bcd0588462a4e5f7f60dd06b5f197d341edf8
|
19049316bb6d769bffcb61d2dfb2b241b65fdb65
|
/server/app/ai/mmfashion/mmfashion/models/registry.py
|
8cbc143707cb4ee919a9e5b735f1957dcb75e9fe
|
[
"Apache-2.0"
] |
permissive
|
alveloper/fashion-scanner-v1
|
7e43b515ad329d19982e5dd5fe92dfbab0bad948
|
067d73cbe3417c2ef337e64ca251c4f883713974
|
refs/heads/master
| 2023-08-12T03:21:32.169558
| 2021-09-07T15:38:58
| 2021-09-07T15:38:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from ..utils import Registry
BACKBONES = Registry('backbone')
GLOBALPOOLING = Registry('global_pool') # global pooling
ATTRPREDICTOR = Registry('attr_predictor') # predict attributes
CATEPREDICTOR = Registry('cate_predictor') # predict category
LOSSES = Registry('loss') # loss function
PREDICTOR = Registry('predictor')
GEOMETRICMATCHING = Registry('geometric_matching')
|
[
"bo373@naver.com"
] |
bo373@naver.com
|
14c48c22b1e687c04b46200c8bfa69ee603b2a11
|
e47bc9571c59b1c6e8aeb4231a286ab8577802d4
|
/easy/888-fair-candy-swap.py
|
8aee86dc55f4d02630c6b484960f1a8054bca360
|
[
"MIT"
] |
permissive
|
changmeng72/leecode_python3
|
d0176502dfaf3c8b455ec491c72979dd25b66b3e
|
8384f52f0dd74b06b1b6aefa277dde6a228ff5f3
|
refs/heads/main
| 2023-05-27T10:35:43.465283
| 2021-06-09T00:20:59
| 2021-06-09T00:20:59
| 375,127,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
class Solution:
def fairCandySwap(self, aliceSizes: List[int], bobSizes: List[int]) -> List[int]:
aliceSum = sum(aliceSizes)
bobSum = sum(bobSizes)
diff = (aliceSum - bobSum)/2
bob_dict = {}
for i in range(len(bobSizes)):
bob_dict[bobSizes[i]] = i
for i in range(len(aliceSizes)):
k = bob_dict.get(aliceSizes[i]-diff,-1)
if k!=-1:
return [aliceSizes[i],bobSizes[k]]
return [0,0]
|
[
"noreply@github.com"
] |
changmeng72.noreply@github.com
|
99393122c191269914bababfb52fe894d63c4585
|
f889bc01147869459c0a516382e7b95221295a7b
|
/swagger_client/models/bundle_data_bundle_option_extension_interface.py
|
186314a8a23389437f249b99836d42fcf7756f20
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,468
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BundleDataBundleOptionExtensionInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
BundleDataBundleOptionExtensionInterface - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BundleDataBundleOptionExtensionInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
0a3864ab0e1a31b27abb83ceb06746239bded135
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03600/s387875234.py
|
e5d97020c3d1d82fca4cb72b6c976aa6b076e6c5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
import heapq
from collections import deque
N = int(input())
A = [[int(a) for a in input().split()] for _ in range(N)]
def dijkstra_heap(s, edge, n):
#始点sから各頂点への最短距離
d = [10**9+1] * n
used = [True] * n #True:未確定
d[s] = 0
used[s] = False
edgelist = []
for a,b in edge[s]:
heapq.heappush(edgelist,a*(10**6)+b)
while len(edgelist):
minedge = heapq.heappop(edgelist)
#まだ使われてない頂点の中から最小の距離のものを探す
if not used[minedge%(10**6)]:
continue
v = minedge%(10**6)
d[v] = minedge//(10**6)
used[v] = False
for e in edge[v]:
if used[e[1]]:
heapq.heappush(edgelist,(e[0]+d[v])*(10**6)+e[1])
return d
Road = [[] for _ in range(N)]
h = []
for i in range(N):
for j in range(i+1, N):
heapq.heappush(h, (A[i][j], i, j))
m = h[0][0]
D = [[10**9+1]*N for _ in range(N)]
ans = 0
while h:
t = heapq.heappop(h)
cost = t[0]
i = t[1]
j = t[2]
if cost < 2*m:
Road[i].append((cost, j))
Road[j].append((cost, i))
D[i][j] = cost
D[j][i] = cost
elif D[i][j] > cost:
D[i] = dijkstra_heap(i, Road, N)
if D[i][j] > cost:
Road[i].append((cost, j))
Road[j].append((cost, i))
D[i][j] = cost
D[j][i] = cost
if D[i][j] < cost:
ans = -1
break
if ans == 0:
for i in range(N):
for t in Road[i]:
ans += t[0]
ans //= 2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6a7f5db06b119930e8b4142194d3392943046c09
|
3d50cdc1fd01717dbcc43007d96a390631725ab4
|
/models/api/backtest/analysis.py
|
588e064d6eec6a3962b2eb0095390f9a30caab1e
|
[] |
no_license
|
danielwangxh/golden_eye
|
22540fc9b60e710263d348de2ecce13928b795fc
|
1f6c9dc6b5cb806398024e5b678f150c074a689a
|
refs/heads/master
| 2021-06-12T06:02:15.028910
| 2017-02-05T04:31:16
| 2017-02-05T04:31:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
# -*- coding: utf8 -*-
import os
from simplejson import loads
import pandas as pd
from models.api.backtest.calculator import FittingDataCalculator
from sell_signals.base import SellSignal
from consts import ema_file_dir, init_point, init_time, init_offset
from libs.utils import ifcode_day_map
def read_df(path):
df_list = loads(open(path).read())
return pd.DataFrame(df_list, columns=['time_index', 'price', 'volume',
'ema_short', 'ema_long'])
class DataAnalyzer(object):
@classmethod
def ema(cls, date, ifcode, period_short, period_long):
_file = '%s/%s_%s_%s_%s' % (ema_file_dir, date, ifcode, period_short, period_long)
if os.path.isfile(_file):
df = read_df(_file)
else:
df = FittingDataCalculator.ema_df(date, ifcode, period_short, period_long)
if df.empty:
return []
sig_infos = SellSignal.compare_ema(df, limit_period=60)
profit_infos = SellSignal.profit_infos(sig_infos)
return profit_infos
#**************************************************************
#macd
@classmethod
def macd(cls, date, ifcode, period_short=12, period_long=26, period_dif=9, \
pre_point=init_point, pre_time=init_time, offset=init_offset):
#date : 'yyyy-mm-dd'
df = FittingDataCalculator.macd_df(date, ifcode, period_short, period_long, \
period_dif, pre_point, pre_time)
if df.empty:
return []
sig_infos = SellSignal.compare_macd(df, 3, offset)
profit_infos = SellSignal.profit_infos(sig_infos)
flags = SellSignal.out_flags(sig_infos)
return profit_infos
@classmethod
def macd_chart(cls, date, ifcode, period_short=12, period_long=26, period_dif=9, \
pre_point=init_point, pre_time=init_time, offset=init_offset):
df = FittingDataCalculator.macd_df(date, ifcode, period_short, period_long, \
period_dif, pre_point, pre_time)
price = df[['time_index', 'price']].values.tolist()
macd_dif = df[['time_index', 'macd_dif']].values.tolist()
macd_dem = df[['time_index', 'macd_dem']].values.tolist()
# flag
sig_infos = SellSignal.compare_macd(df, 3, offset)
flags = SellSignal.out_flags(sig_infos)
return [price, macd_dif, macd_dem, flags]
#*********************************************************
#analysis
#*********************************************************
@classmethod
def macd_analysis(cls, date, ifcode, period_short, period_long, \
period_dif, pre_point, pre_time, pre_offset):
#date : 'yyyy-mm-dd'
df = FittingDataCalculator.macd_df(date, ifcode, period_short, period_long, \
period_dif, pre_point, pre_time)
if df.empty:
return []
sig_infos = SellSignal.compare_macd(df, 3, pre_offset)
profit_infos = SellSignal.profit_infos(sig_infos)
return profit_infos
@classmethod
def macd_if_analysis(cls, ifcode, pre_point, pre_time, pre_offset, \
period_short=12, period_long=26, period_dif=9, trans_amount=1):
rs = []
total = 0
pos_num = 0
nag_num = 0
trans_total_num = 0
date_list = ifcode_day_map(ifcode)
for day in date_list:
profit_infos = cls.macd_analysis(day, ifcode, period_short, period_long, \
period_dif, pre_point, pre_time, pre_offset)
profit_all = 0
trans_num = (len(profit_infos) - 1) / 2
trans_total_num += trans_num
for item in profit_infos:
if item['gain'] != '-':
profit_all += int(item['gain']) * trans_amount
rs.append({'date': day, 'profit': profit_all, 'trans_num': trans_num})
total += profit_all
if profit_all >= 0:
pos_num += 1
elif profit_all < 0 :
nag_num += 1
if nag_num == 0:
profit_rate = pos_num
else:
profit_rate = pos_num*1.0/nag_num
fees = trans_total_num * 2300
real_profit = total - fees
return {'profit': total, 'real_profit': real_profit,
'profit_rate': profit_rate, 'trans_total_num': trans_total_num,
'fees': fees, 'trans_amount': trans_amount}
#*************************************************************
#boll
@classmethod
def boll_chart(cls, date, ifcode, period_short=50, period_long=80, pre_point=10):
df = FittingDataCalculator.boll_df(date, ifcode, period_short, period_long, pre_point)
price = df[['time_index', 'price']].values.tolist()
boll_up = df[['time_index', 'boll_up']].values.tolist()
boll_dn = df[['time_index', 'boll_dn']].values.tolist()
boll_mb = df[['time_index', 'boll_mb']].values.tolist()
return price, boll_up, boll_dn, boll_mb
@classmethod
def boll(cls, date, ifcode, period_short=50, period_long=80, pre_point=10):
df = FittingDataCalculator.boll_df(date, ifcode, period_short, period_long, pre_point)
sig_infos = SellSignal.compare_boll_b_percent(df)
profit_infos = SellSignal.profit_infos(sig_infos)
return profit_infos
|
[
"onestar1967@gmail.com"
] |
onestar1967@gmail.com
|
62ff16334955a6f7413d9d5e053b966753d69de4
|
e47bd43968732b55907aa1b8d3530b9ec12bc0e1
|
/pipeline/pipeline_stack.py
|
91dd67f89f5ee450816f3521282190c65eac6b36
|
[
"Apache-2.0"
] |
permissive
|
damshenas/enechobot
|
523763c8bb3d54e86fb6586c4883ca3008704014
|
9e346a87433abc1d95c60c3b9ee9401becf7a88f
|
refs/heads/main
| 2023-06-28T16:21:08.637064
| 2021-08-09T10:19:08
| 2021-08-09T10:19:08
| 394,225,747
| 0
| 0
|
Apache-2.0
| 2021-08-09T10:19:09
| 2021-08-09T09:15:21
| null |
UTF-8
|
Python
| false
| false
| 4,368
|
py
|
import json
from aws_cdk import (core, aws_codebuild as codebuild,
aws_codecommit as codecommit,
aws_codepipeline as codepipeline,
aws_codepipeline_actions as codepipeline_actions,
aws_s3 as s3,
aws_lambda as lambda_)
class PipelineStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, *, repo_name: str=None,
application_code: lambda_.CfnParametersCode=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# In this stack we create the pipeline using CDK
# We have one pipeline, 2 builds and 1 deployment
# The pipeline has multiple stages (in this example we have 3 stages: Source, Build, and Deploy)
# 1 builds is for creating the artifact used for lambda function
# another build is for creating the cloudformation template and the whole infra
# reading the buildspecs JSON from file but should also be possible to write it in Python
with open('./pipeline/buildspecs.json') as f:
buildspecs = json.load(f)
### S3 bucket
# for build output
build_output_S3_bucket = s3.Bucket(self, "BUILD_OUTCOME")
# Important Note. It is better not to create the repo in the stack as destroying the stack can delete the repo!!
code = codecommit.Repository.from_repository_name(self, "ImportedRepo", repo_name)
# buildspec phase name: built. Possible phases: build,install,post_build,pre_build
cdk_build_spec = codebuild.BuildSpec.from_object(buildspecs["cdk_build_spec"])
telegram_build_spec = codebuild.BuildSpec.from_object(buildspecs["telegram_build_spec"])
cdk_build = codebuild.PipelineProject(self, "CdkBuild", build_spec=cdk_build_spec)
telegram_build = codebuild.PipelineProject(self, 'telegram', build_spec=telegram_build_spec)
source_output = codepipeline.Artifact()
cdk_build_output = codepipeline.Artifact("CdkBuildOutput")
telegram_build_output = codepipeline.Artifact("TelegramBuildOutput")
telegram_lambda_location = telegram_build_output.s3_location
pipeline_source_stage = codepipeline.StageProps(stage_name="Source",
actions=[
codepipeline_actions.CodeCommitSourceAction(
action_name="CodeCommit_Source",
repository=code,
branch="develop",
output=source_output)])
pipeline_build_stage = codepipeline.StageProps(stage_name="Build",
actions=[
codepipeline_actions.CodeBuildAction(
action_name="telegram_build",
project=telegram_build,
input=source_output,
outputs=[telegram_build_output]),
codepipeline_actions.CodeBuildAction(
action_name="CDK_Build",
project=cdk_build,
input=source_output,
outputs=[cdk_build_output])
])
pipeline_deploy_stage_action1 = codepipeline_actions.CloudFormationCreateUpdateStackAction(
action_name="Lambda_CFN_Deploy",
template_path=cdk_build_output.at_path("EnEchoBot.template.json"),
stack_name="TelegramDeploymentStack",
admin_permissions=True,
parameter_overrides=dict(
application_code.assign(
bucket_name=telegram_lambda_location.bucket_name,
object_key=telegram_lambda_location.object_key,
object_version=telegram_lambda_location.object_version)),
extra_inputs=[telegram_build_output])
pipeline_deploy_stage = codepipeline.StageProps(stage_name="Deploy", actions=[pipeline_deploy_stage_action1])
codepipeline.Pipeline(self, "Pipeline", stages=[pipeline_source_stage, pipeline_build_stage, pipeline_deploy_stage], artifact_bucket=build_output_S3_bucket)
|
[
"you@example.com"
] |
you@example.com
|
e96ceb65864385e40bf500f14c613be2609612c7
|
ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c
|
/venv/Lib/site-packages/qiskit/chemistry/algorithms/ground_state_solvers/minimum_eigensolver_factories/vqe_uvccsd_factory.py
|
b6420e3857c9e732031dfb81131ff2bd2a339632
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
shivam675/Quantum-CERN
|
b60c697a3a7ad836b3653ee9ce3875a6eafae3ba
|
ce02d9198d9f5a1aa828482fea9b213a725b56bb
|
refs/heads/main
| 2023-01-06T20:07:15.994294
| 2020-11-13T10:01:38
| 2020-11-13T10:01:38
| 330,435,191
| 1
| 0
|
MIT
| 2021-01-17T16:29:26
| 2021-01-17T16:29:25
| null |
UTF-8
|
Python
| false
| false
| 6,073
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The minimum eigensolver factory for ground state calculation algorithms."""
from typing import Optional
import numpy as np
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import MinimumEigensolver, VQE
from qiskit.aqua.operators import ExpectationBase
from qiskit.aqua.components.optimizers import Optimizer
from qiskit.chemistry.components.initial_states import VSCF
from qiskit.chemistry.components.variational_forms import UVCC
from qiskit.chemistry.transformations import BosonicTransformation
from .minimum_eigensolver_factory import MinimumEigensolverFactory
class VQEUVCCSDFactory(MinimumEigensolverFactory):
"""A factory to construct a VQE minimum eigensolver with UVCCSD ansatz wavefunction."""
def __init__(self,
quantum_instance: QuantumInstance,
optimizer: Optional[Optimizer] = None,
initial_point: Optional[np.ndarray] = None,
expectation: Optional[ExpectationBase] = None,
include_custom: bool = False) -> None:
"""
Args:
quantum_instance: The quantum instance used in the minimum eigensolver.
optimizer: A classical optimizer.
initial_point: An optional initial point (i.e. initial parameter values)
for the optimizer. If ``None`` then VQE will look to the variational form for a
preferred point and if not will simply compute a random one.
expectation: The Expectation converter for taking the average value of the
Observable over the var_form state function. When ``None`` (the default) an
:class:`~qiskit.aqua.operators.expectations.ExpectationFactory` is used to select
an appropriate expectation based on the operator and backend. When using Aer
qasm_simulator backend, with paulis, it is however much faster to leverage custom
Aer function for the computation but, although VQE performs much faster
with it, the outcome is ideal, with no shot noise, like using a state vector
simulator. If you are just looking for the quickest performance when choosing Aer
qasm_simulator and the lack of shot noise is not an issue then set `include_custom`
parameter here to ``True`` (defaults to ``False``).
include_custom: When `expectation` parameter here is None setting this to ``True`` will
allow the factory to include the custom Aer pauli expectation.
"""
self._quantum_instance = quantum_instance
self._optimizer = optimizer
self._initial_point = initial_point
self._expectation = expectation
self._include_custom = include_custom
@property
def quantum_instance(self) -> QuantumInstance:
"""Getter of the quantum instance."""
return self._quantum_instance
@quantum_instance.setter
def quantum_instance(self, q_instance: QuantumInstance) -> None:
"""Setter of the quantum instance."""
self._quantum_instance = q_instance
@property
def optimizer(self) -> Optimizer:
"""Getter of the optimizer."""
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer: Optimizer) -> None:
"""Setter of the optimizer."""
self._optimizer = optimizer
@property
def initial_point(self) -> np.ndarray:
"""Getter of the initial point."""
return self._initial_point
@initial_point.setter
def initial_point(self, initial_point: np.ndarray) -> None:
"""Setter of the initial point."""
self._initial_point = initial_point
@property
def expectation(self) -> ExpectationBase:
"""Getter of the expectation."""
return self._expectation
@expectation.setter
def expectation(self, expectation: ExpectationBase) -> None:
"""Setter of the expectation."""
self._expectation = expectation
@property
def include_custom(self) -> bool:
"""Getter of the ``include_custom`` setting for the ``expectation`` setting."""
return self._include_custom
@include_custom.setter
def include_custom(self, include_custom: bool) -> None:
"""Setter of the ``include_custom`` setting for the ``expectation`` setting."""
self._include_custom = include_custom
def get_solver(self, transformation: BosonicTransformation) -> MinimumEigensolver:
"""Returns a VQE with a UVCCSD wavefunction ansatz, based on ``transformation``.
This works only with a ``BosonicTransformation``.
Args:
transformation: a bosonic qubit operator transformation.
Returns:
A VQE suitable to compute the ground state of the molecule transformed
by ``transformation``.
"""
basis = transformation.basis
num_modes = transformation.num_modes
if isinstance(basis, int):
basis = [basis] * num_modes
num_qubits = sum(basis)
initial_state = VSCF(basis)
var_form = UVCC(num_qubits, basis, [0, 1], initial_state=initial_state)
vqe = VQE(var_form=var_form,
quantum_instance=self._quantum_instance,
optimizer=self._optimizer,
initial_point=self._initial_point,
expectation=self._expectation,
include_custom=self._include_custom)
return vqe
def supports_aux_operators(self):
return VQE.supports_aux_operators()
|
[
"vinfinitysailor@gmail.com"
] |
vinfinitysailor@gmail.com
|
f69e4d4f4288a29c8508e0ac794a2eaa0995a2b8
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week02/1/proxy/proxy/spiders/maoyan_20200705155728.py
|
f4c680398e9df42ea33eb12d51a8bd6097744c5a
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
import scrapy
from proxy.items import ProxyItem
import lxml.etree
class MaoyanSpider(scrapy.Spider):
name = 'maoyan'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/']
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593315374931.1593349407197.45; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; mojo-session-id={"id":"afe2ef89c10d6e1c8fc94e26d831b20e","time":1593349078441}; mojo-trace-id=4; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593349407; _lxsdk_s=172fb017d51-4c4-303-783%7C%7C8',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
yield scrapy.Request(url=url,headers=self.header,callback=self.parse)
def parse(self, response):
selector = lxml.etree.HTML(response.text)
item =ProxyItem()
for i in range(0,10):
link = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd['+i'+']/div/div/div[1]/p[1]/a').get('href')
name = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd['i]/div/div/div[1]/p[1]/a').get('title')
time = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd[i]/div/div/div[1]/p[3]').text
item['films_name'] = name
item['release_time'] = time
print(link)
yield scrapy.Request(url=link, headers = self.header, meta={'item':item},callback=self.parse1)
def parse1(self, response):
item = response.meta['item']
selector = lxml.etree.HTML(response.text)
type = selector.xpath('/html/body/div[3]/div/div[2]/div[1]/ul/li[1]').text.replace('\n',' ')
print(type)
item['films_type'] = type
print(item)
yield item
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
fe29ddbba5045df30a1934eb9bcd7574cdc67eb7
|
71c9bf59320d5a67d1395c02ee7d68805f820db7
|
/solutions/A_h_djikshtra's_algorithm.py
|
28868dd91a4a652195450a6db4e23dc99f6283b9
|
[] |
no_license
|
AnjalBam/iw-assignment-python-iii
|
34609eef05f4b57a3cc17166603c121c1635b2a9
|
c1a05c1a0091de3b718fcb476bbc906817492294
|
refs/heads/master
| 2022-11-13T20:23:39.642580
| 2020-07-13T09:45:00
| 2020-07-13T09:45:00
| 279,208,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
"""
A. Make pythonic solutions for each of the following data structure
and algorithm problems.
e) Dijkshtra's Algorithm
"""
from collections import deque, namedtuple
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def make_edge(start, end, cost=1):
return Edge(start, end, cost)
class Graph:
def __init__(self, edges):
# check if the data is right
wrong_edges = [i for i in edges if len(i) not in [2, 3]]
if wrong_edges:
raise ValueError('Wrong edges data: {}'.format(wrong_edges))
self.edges = [make_edge(*edge) for edge in edges]
@property
def vertices(self):
return set(
sum(
([edge.start, edge.end] for edge in self.edges), []
)
)
def get_node_pairs(self, n1, n2, both_ends=True):
if both_ends:
node_pairs = [[n1, n2], [n2, n1]]
else:
node_pairs = [[n1, n2]]
return node_pairs
def remove_edge(self, n1, n2, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
edges = self.edges[:]
for edge in edges:
if [edge.start, edge.end] in node_pairs:
self.edges.remove(edge)
def add_edge(self, n1, n2, cost=1, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
for edge in self.edges:
if [edge.start, edge.end] in node_pairs:
return ValueError('Edge {} {} already exists'.format(n1, n2))
self.edges.append(Edge(start=n1, end=n2, cost=cost))
if both_ends:
self.edges.append(Edge(start=n2, end=n1, cost=cost))
@property
def neighbours(self):
neighbours = {vertex: set() for vertex in self.vertices}
for edge in self.edges:
neighbours[edge.start].add((edge.end, edge.cost))
return neighbours
def dijkstra(self, source, dest):
assert source in self.vertices, 'Such source node doesn\'t exist'
distances = {vertex: inf for vertex in self.vertices}
previous_vertices = {
vertex: None for vertex in self.vertices
}
distances[source] = 0
vertices = self.vertices.copy()
while vertices:
current_vertex = min(
vertices, key=lambda vertex: distances[vertex])
vertices.remove(current_vertex)
if distances[current_vertex] == inf:
break
for neighbour, cost in self.neighbours[current_vertex]:
alternative_route = distances[current_vertex] + cost
if alternative_route < distances[neighbour]:
distances[neighbour] = alternative_route
previous_vertices[neighbour] = current_vertex
path, current_vertex = deque(), dest
while previous_vertices[current_vertex] is not None:
path.appendleft(current_vertex)
current_vertex = previous_vertices[current_vertex]
if path:
path.appendleft(current_vertex)
return path
graph = Graph([
("a", "b", 7), ("a", "c", 9), ("a", "f", 14), ("b", "c", 10),
("b", "d", 15), ("c", "d", 11), ("c", "f", 2), ("d", "e", 6),
("e", "f", 9)])
print(graph.dijkstra("a", "e"))
|
[
"anjalbam81@gmail.com"
] |
anjalbam81@gmail.com
|
9299880d2374060c24d9a6fd117a920d11784c44
|
ea2cf796332879d86561f80882da93b672966448
|
/configs/rotated_retinanet/ssdd/rotated_retinanet_hbb_r50_adamw_fpn_6x_ssdd_oc.py
|
6b6e85da78c1deeca585141b1fa61f5de0d0f2e7
|
[
"Apache-2.0"
] |
permissive
|
yangxue0827/h2rbox-mmrotate
|
968c34adf22eca073ab147b670226884ea80ac61
|
cfd7f1fef6ae4d4e17cb891d1ec144ece8b5d7f5
|
refs/heads/main
| 2023-05-23T10:02:58.344148
| 2023-02-14T05:28:38
| 2023-02-14T05:28:38
| 501,580,810
| 68
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
_base_ = ['./rotated_retinanet_hbb_r50_fpn_6x_ssdd_oc.py']
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
|
[
"yangxue0827@126.com"
] |
yangxue0827@126.com
|
bfa0210d76e2501a11b68119104d1f8ddab47ced
|
e76ea38dbe5774fccaf14e1a0090d9275cdaee08
|
/src/xwalk/tools/build/win/FILES.cfg
|
446675943eaebc95282133c60583a38067c11deb
|
[
"BSD-3-Clause"
] |
permissive
|
eurogiciel-oss/Tizen_Crosswalk
|
efc424807a5434df1d5c9e8ed51364974643707d
|
a68aed6e29bd157c95564e7af2e3a26191813e51
|
refs/heads/master
| 2021-01-18T19:19:04.527505
| 2014-02-06T13:43:21
| 2014-02-06T13:43:21
| 16,070,101
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
cfg
|
# -*- python -*-
# ex: set syntax=python:
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a buildbot configuration file containing a tagged list of files
# processed by the stage/archive scripts. The known tags are:
#
# filename: Name of the file in the build output directory.
# arch: List of CPU architectures for which this file should be processed
# Leave this unspecified to prcoess for all architectures.
# Acceptable values are 64bit, 32bit and arm.
# buildtype: List of build types for which this file should be processed.
# archive: The name of the archive file to store filename in. If not specified,
# filename is added to the default archive (e.g. platform.zip). If
# archive == filename, filename is archived directly, not zipped.
# direct_archive: Force a file to be archived as-is, bypassing zip creation.
# NOTE: This flag will not apply if more than one file has the
# same 'archive' name, which will create a zip of all the
# files instead.
# filegroup: List of named groups to which this file belongs.
# default: Legacy "default archive". TODO(mmoss): These should
# be updated to specify an 'archive' name and then this
# filegroup and the related archive_utils.ParseLegacyList()
# should go away.
# symsrc: Files to upload to the symbol server.
# optional: List of buildtypes for which the file might not exist, and it's not
# considered an error.
FILES = [
{
'filename': 'xwalk.exe',
'buildtype': ['dev', 'official'],
},
{
'filename': 'xwalk.pak',
'buildtype': ['dev', 'official'],
},
{
'filename': 'ffmpegsumo.dll',
'buildtype': ['dev'],
},
{
'filename': 'icudt.dll',
'buildtype': ['dev', 'official'],
},
{
'filename': 'D3DCompiler_46.dll',
'buildtype': ['dev'],
},
{
'filename': 'libEGL.dll',
'buildtype': ['dev', 'official'],
},
{
'filename': 'libGLESv2.dll',
'buildtype': ['dev', 'official'],
},
# installer creation scripts
{
'filename': 'create_windows_installer.bat',
'buildtype': ['dev', 'official'],
},
{
'filename': 'app.wxs.templ',
'buildtype': ['dev', 'official'],
},
{
'filename': 'guid.vbs',
'buildtype': ['dev', 'official'],
},
# syms files
{
'filename': 'xwalk.exe.pdb',
'buildtype': ['dev', 'official'],
'archive': 'xwalk-win32-syms.zip',
},
{
'filename': 'ffmpegsumo.dll.pdb',
'buildtype': ['dev'],
'archive': 'xwalk-win32-syms.zip',
},
{
'filename': 'libEGL.dll.pdb',
'buildtype': ['dev', 'official'],
'archive': 'xwalk-win32-syms.zip',
},
{
'filename': 'libGLESv2.dll.pdb',
'buildtype': ['dev', 'official'],
'archive': 'xwalk-win32-syms.zip',
},
# XPK package generator
{
'filename': 'tools/make_xpk.py',
'buildtype': ['dev', 'official'],
},
]
|
[
"ronan@fridu.net"
] |
ronan@fridu.net
|
7ff073702298c100dd9cd88ef17679ad6ee1850e
|
412b699e0f497ac03d6618fe349f4469646c6f2d
|
/env/lib/python3.8/site-packages/Crypto/Util/Padding.py
|
da69e55987227357a55f8e1b57fae5f7eb8cac74
|
[
"MIT"
] |
permissive
|
EtienneBrJ/Portfolio
|
7c70573f02a5779f9070d6d9df58d460828176e3
|
6b8d8cf9622eadef47bd10690c1bf1e7fd892bfd
|
refs/heads/main
| 2023-09-03T15:03:43.698518
| 2021-11-04T01:02:33
| 2021-11-04T01:02:33
| 411,076,325
| 1
| 0
|
MIT
| 2021-10-31T13:43:09
| 2021-09-27T23:48:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,313
|
py
|
#
# Util/Padding.py : Functions to manage padding
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
__all__ = [ 'pad', 'unpad' ]
from Crypto.Util.py3compat import *
def pad(data_to_pad, block_size, style='pkcs7'):
"""Apply standard padding.
Args:
data_to_pad (byte string):
The data that needs to be padded.
block_size (integer):
The block boundary to use for padding. The output length is guaranteed
to be a multiple of :data:`block_size`.
style (string):
Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*.
Return:
byte string : the original data with the appropriate padding added at the end.
"""
padding_len = block_size-len(data_to_pad)%block_size
if style == 'pkcs7':
padding = bchr(padding_len)*padding_len
elif style == 'x923':
padding = bchr(0)*(padding_len-1) + bchr(padding_len)
elif style == 'iso7816':
padding = bchr(128) + bchr(0)*(padding_len-1)
else:
raise ValueError("Unknown padding style")
return data_to_pad + padding
def unpad(padded_data, block_size, style='pkcs7'):
"""Remove standard padding.
Args:
padded_data (byte string):
A piece of data with padding that needs to be stripped.
block_size (integer):
The block boundary to use for padding. The input length
must be a multiple of :data:`block_size`.
style (string):
Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*.
Return:
byte string : data without padding.
Raises:
ValueError: if the padding is incorrect.
"""
pdata_len = len(padded_data)
if pdata_len == 0:
raise ValueError("Zero-length input cannot be unpadded")
if pdata_len % block_size:
raise ValueError("Input data is not padded")
if style in ('pkcs7', 'x923'):
padding_len = bord(padded_data[-1])
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if style == 'pkcs7':
if padded_data[-padding_len:]!=bchr(padding_len)*padding_len:
raise ValueError("PKCS#7 padding is incorrect.")
else:
if padded_data[-padding_len:-1]!=bchr(0)*(padding_len-1):
raise ValueError("ANSI X.923 padding is incorrect.")
elif style == 'iso7816':
padding_len = pdata_len - padded_data.rfind(bchr(128))
if padding_len<1 or padding_len>min(block_size, pdata_len):
raise ValueError("Padding is incorrect.")
if padding_len>1 and padded_data[1-padding_len:]!=bchr(0)*(padding_len-1):
raise ValueError("ISO 7816-4 padding is incorrect.")
else:
raise ValueError("Unknown padding style")
return padded_data[:-padding_len]
|
[
"etiennebrxv@gmail.com"
] |
etiennebrxv@gmail.com
|
fd7b4afd97e2ab6d8426692b6eb6f7be4be4d1e6
|
6b1fd67270b150ec3a4983945f6374c532e0c3b5
|
/pymesh/examples/md5/main.py
|
6ab99a96b6f2a2fccd3c3a7466c0a19d02bdf929
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
adamlwgriffiths/PyMesh
|
8078e2ff4cc93e9def368b4fbbbf52b9f8b3292e
|
dbed5c7a226b820fc3adb33e30f750ab1ffbd892
|
refs/heads/master
| 2016-09-05T09:42:43.068368
| 2013-02-03T14:52:11
| 2013-02-03T14:52:11
| 5,817,565
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
import os
from pymesh.md5 import MD5_Mesh, MD5_Anim
def load_mesh( filename ):
print 'Loading', filename
md5 = MD5_Mesh()
md5.load( filename )
# print the data out for verification
print 'version', md5.md5_version
print 'num_joints', md5.num_joints
print 'num_meshes', md5.num_meshes
print 'joints'
for joint in md5.joints:
print '\tname', joint.name
print '\tparent', joint.parent
print '\tposition', joint.position
print '\torientation', joint.orientation
print 'meshes'
for mesh in md5.meshes:
print '\tshader', mesh.shader
print '\tnumverts', mesh.num_verts
for vert in mesh.vertices:
print '\t\ttcs', vert.tcs
print '\t\tstart_weight', vert.start_weight
print '\t\tweight_count', vert.weight_count
print '\tnumtris', mesh.num_tris
for tri in mesh.tris:
print '\t\ttri', tri
print '\tnumweights', mesh.num_weights
for weight in mesh.weights:
print '\t\tjoint', weight.joint
print '\t\tbias', weight.bias
print '\t\tposition', weight.position
def load_anim( filename ):
print 'Loading', filename
md5 = MD5_Anim()
md5.load( filename )
print 'version', md5.md5_version
print 'frame_rate', md5.frame_rate
print 'hierarchy'
print 'num_joints', md5.hierarchy.num_joints
for joint in md5.hierarchy:
print '\tname', joint.name
print '\tparent', joint.parent
print '\tnum_components', joint.num_components
print '\tframe', joint.frame
print 'bounds'
print 'num_bounds', md5.bounds.num_bounds
for bounds in md5.bounds:
print '\tminimum', bounds[ 0 ]
print '\tmaximum', bounds[ 1 ]
print 'base frame'
print 'num_bones', md5.base_frame.num_bones
for bone in md5.base_frame:
print '\tposition', bone.position
print '\torientation', bone.orientation
print 'frames'
print 'num_frames', md5.num_frames
for frame in md5.frames:
print '\tjoints'
print '\tnum_joints', frame.num_joints
for joint in frame:
print '\t\tposition', joint.position
print '\t\torientation', joint.orientation
def main():
# load all md5 files in our data directory
# get the path relative to our examples file
path = os.path.join(
os.path.dirname( __file__ ),
'../data/md5'
)
# get the directory contents
contents = os.listdir(path)
# iterate through the contents and load
# each file that is a .md5mesh or .md5anim file
for filename in contents:
name, extension = os.path.splitext( filename )
# reattach our current directory
path = os.path.join(
os.path.dirname( __file__ ),
'../data/md5',
filename
)
if extension.lower() == '.md5mesh':
load_mesh( path )
if extension.lower() == '.md5anim':
load_anim( path )
if __name__ == '__main__':
main()
|
[
"adam.lw.griffiths@gmail.com"
] |
adam.lw.griffiths@gmail.com
|
516e94a8bd08fc5bf28a46f5f4ec6ff8a5dce10d
|
e7164d44058a06331c034cc17eefe1521d6c95a2
|
/tools/tieba/urate/python-wrapper/melt_predict.py
|
3805c92f412dc70ce42b65f942925d6170288160
|
[] |
no_license
|
chenghuige/gezi
|
fbc1e655396fbc365fffacc10409d35d20e3952c
|
4fc8f9a3c5837e8add720bf6954a4f52abfff8b5
|
refs/heads/master
| 2021-01-20T01:57:18.362413
| 2016-11-08T15:34:07
| 2016-11-08T15:34:07
| 101,304,774
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
import os
import sys
import glob
from pyplusplus import module_builder
root = '/home/users/chenghuige/rsc/'
name = 'melt_predict'
#define_symbols = ['GCCXML','PYTHON_WRAPPER','NO_BAIDU_DEP']
define_symbols = ['GCCXML','PYTHON_WRAPPER']
files = [
'./gezi.include.python/common_util.h',
'./include.python/MLCore/Predictor.h',
'./include.python/MLCore/TextPredictor.h',
'./gezi.include.python/Identifer.h',
'./include.python/MLCore/PredictorFactory.h',
'./gezi.include.python/Numeric/Vector/Vector.h',
'./gezi.include.python/Numeric/Vector/WeightVector.h',
'./gezi.include.python/Numeric/Vector/vector_util.h',
'./gezi.include.python/feature/FeatureVector.h',
'./gezi.include.python/feature/features_util.h',
'./gezi.include.python/feature/FeatureExtractor.h',
'./gezi.include.python/feature/FeaturesExtractorMgr.h',
]
paths = [
#'./gezi.include.python/Numeric/Vector/',
#'./include.python/MLCore/',
#'./include.python/Prediction/Instances/',
]
#import gezi
#for path in paths:
# files += [f for f in gezi.get_filepaths(path) if f.endswith('.h')]
include_paths=[
'third-64/glog',
'third-64/gflags',
'third-64/gtest',
'third-64/boost.1.53',
'lib2-64/bsl',
'lib2-64/postag',
'lib2-64/dict',
'lib2-64/libcrf',
'lib2-64/others-ex',
'lib2-64/ullib',
'lib2-64/ccode',
'public/odict/output',
'public/uconv/output',
'public/configure/output',
'app/search/sep/anti-spam/gezi/third/rabit',
]
include_paths_python = [
'app/search/sep/anti-spam/melt/python-wrapper',
]
include_paths_obsolute = [
'app/search/sep/anti-spam/melt/python-wrapper/gezi.include.python',
'lib2-64/wordseg',
'public/comlog-plugin',
'app/search/sep/anti-spam/gezi/third',
]
mb = module_builder.module_builder_t(
gccxml_path = '~/.jumbo/bin/gccxml',
define_symbols = define_symbols,
files = files,
include_paths = [root + f + '/include' for f in include_paths]
+ [root + f + '/include.python' for f in include_paths_python]
+ [root + f for f in include_paths_obsolute]
)
mb.build_code_creator( module_name='lib%s'%name )
mb.code_creator.user_defined_directories.append( os.path.abspath('.') )
mb.write_module( os.path.join( os.path.abspath('./'), '%s_py.cc'%name) )
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
483e5aadac9f2d40958e1167b76c220a451edcaf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02403/s798093890.py
|
f1e8cb17072b4b550e16ce6c68a1df20760dc0e6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import sys
x=y=1
while True:
x,y=map(int,input().split())
if x==0: break
for i in range (1,x+1):
for j in range (1,y+1):
sys.stdout.write('#')
print('')
print('')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0e29b7eea6f44cbedda24a54fc2477b33a4ff5fb
|
62166473c5b237bbe97c7abcafd2623a1bfc3a77
|
/linked_list_queue.py
|
e2b18155e138b3ebc3ba7bd14cca4538c8bf0f96
|
[] |
no_license
|
ananyajana/practice_problems
|
bc70bd55b9012fa7fafc2037ea606bc34bd63cad
|
0071a8a3753c8a9135c21fecf6b175ee3774c177
|
refs/heads/master
| 2023-05-15T01:23:35.146811
| 2021-06-11T21:27:40
| 2021-06-11T21:27:40
| 109,902,077
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
# problems from geeksforgeeks must do coding questions
T = int(input())
st_list =[]
N_list = []
for t in range(T):
N_list.append(int(input()))
st_list.append(input())
class Node:
# fn to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
class MyQueue:
def __init__(self):
self.front = None
# method to add an item to the queue
def push(self, item):
node = Node(item)
if self.front is None:
self.front = node
else:
temp = self.front
while temp.next is not None:
temp = temp.next
temp.next = node
# method to remove an item from the queue
def pop(self):
if self.front is None:
return -1
else:
temp = self.front
self.front = self.front.next
return temp.data
for t in range(T):
s1 = st_list[t]
n = N_list[t]
|
[
"ananya.jana@gmail.com"
] |
ananya.jana@gmail.com
|
280d995579a3904e0a74306a09310df360636dd4
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2182/61406/303898.py
|
0fcb1dffc2380878a68d6135d58d8e3a3cb4ebba
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
T = int(input())
for a in range(0,T):
nk = input().split(' ')
n = int(nk[0])
k = int(nk[1])
circle = []
for b in range(0,n):
circle.append(1)
ptr = 0
count = 0
m=0
while count<n-1:
if ptr>n-1:
ptr = ptr-n
while circle[ptr]==-1:
ptr+=1
if ptr > n - 1:
ptr = ptr - n
if circle[ptr]==1:
m += 1
if m==k:
circle[ptr]=-1
count+=1
m=0
ptr+=1
print(circle.index(1)+1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
9eacff23f758535c87a7687002c988a9cad9bb7d
|
db575f3401a5e25494e30d98ec915158dd7e529b
|
/BIO_Stocks/RZLT.py
|
79c2560d453f46f7fd8351357550d6986f2de632
|
[] |
no_license
|
andisc/StockWebScraping
|
b10453295b4b16f065064db6a1e3bbcba0d62bad
|
41db75e941cfccaa7043a53b0e23ba6e5daa958a
|
refs/heads/main
| 2023-08-08T01:33:33.495541
| 2023-07-22T21:41:08
| 2023-07-22T21:41:08
| 355,332,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'https://www.rezolutebio.com/news/press-releases'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles_panel = soup.find('ul', attrs={'class':'news'})
articles = articles_panel.findAll('li')
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('time')
article_desc = FIRST_ARTICLE.find('a')
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
|
[
"andisc_3@hotmail.com"
] |
andisc_3@hotmail.com
|
75ad1cf37bc5ac16b62c19cd737a317a998c3347
|
f47ac8d59fe1c0f807d699fe5b5991ed3662bfdb
|
/binary23.py
|
6f1107467849f636bfca3fa944aa05d18d2fb699
|
[] |
no_license
|
YanglanWang/jianzhi_offer
|
5561d8a29881d8504b23446353e9f969c01ed0c5
|
1c568f399ed6ac1017671c40c765e609c1b6d178
|
refs/heads/master
| 2020-06-16T10:41:44.979558
| 2019-08-03T09:07:37
| 2019-08-03T09:07:37
| 195,543,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
class Solution:
def VerifySquenceOfBST(self, sequence):
# write code here
if len(sequence) == 0:
return False
if len(sequence) == 1:
return True
root = sequence[-1]
for i in range(len(sequence)):
if sequence[i] > sequence[-1]:
break
k = i
for j in range(i,len(sequence)-1):
if sequence[j]<sequence[-1]:
return False
# if k==len(sequence)-1:
# if sequence[k - 1] > sequence[-1]:
# return False
# else:
# return self.VerifySquenceOfBST(sequence[:k])
# elif k==0:
# if sequence[-2] < sequence[-1]:
# return False
# else:
# return self.VerifySquenceOfBST(sequence[:-1])
# else:
# if sequence[k - 1] > sequence[-1] or sequence[-2] < sequence[-1]:
# return False
left=True
right=True
if k>0:
left=self.VerifySquenceOfBST(sequence[:k])
if k<len(sequence)-1:
right=self.VerifySquenceOfBST(sequence[k:-1])
return left and right
a=Solution()
# b=a.VerifySquenceOfBST([4,6,7,5])
b=a.VerifySquenceOfBST([4,3,5,8,10,9,7])
print(b)
|
[
"yanglan-17@mails.tsinghua.edu.cn"
] |
yanglan-17@mails.tsinghua.edu.cn
|
f288784a0437bf94488c1a422eb9e0f0ca36f3e1
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/data/kernel_tests/repeat_test.py
|
88e83da5bc27a9aeaa9b63ef9e314e6f97cb074f
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 3,445
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.repeat()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RepeatTest(test_base.DatasetTestBase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
def do_test(count):
dataset = dataset_ops.Dataset.from_tensors(components).repeat(count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, [components] * count)
# Test a finite repetition.
do_test(3)
# test a different finite repetition.
do_test(7)
# Test an empty repetition.
do_test(0)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
dataset = dataset_ops.Dataset.from_tensors(components).repeat(-1)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(17):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
inner_count, outer_count = 7, 14
dataset = dataset_ops.Dataset.from_tensors(components).repeat(
inner_count).repeat(outer_count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset,
[components] * (inner_count * outer_count))
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10).repeat(-1)
self.assertDatasetProduces(dataset, [])
if __name__ == "__main__":
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
b3e56b32c4b8350754af374e43a7c8207d17fd73
|
f0f56524d54b924eda0bc1abcc386589ccf2a026
|
/dittodemo/settings/defaults.py
|
40f9289fd1b893ec30d6ee20799ba3c67e0154ac
|
[
"MIT"
] |
permissive
|
vanderwal/django-ditto-demo
|
052d116419b8f67db40cd7a71b793bd702672c0f
|
abb251d44de48191b32ef54768f638920d39d081
|
refs/heads/master
| 2022-02-03T21:07:14.860800
| 2017-02-09T13:50:36
| 2017-02-09T13:50:36
| 66,157,322
| 0
| 0
| null | 2016-08-20T16:43:14
| 2016-08-20T16:43:13
| null |
UTF-8
|
Python
| false
| false
| 5,235
|
py
|
"""
Django settings for dittodemo project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from os import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Make this unique, and don't share it with anybody.
# http://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ADMINS = [
('Phil Gyford', 'phil@gyford.com'),
]
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Our Django app.
'demo',
# For Django Ditto.
'imagekit',
'sortedm2m',
'taggit',
'ditto.core',
'ditto.flickr',
'ditto.lastfm',
'ditto.pinboard',
'ditto.twitter',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dittodemo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ditto.core.context_processors.ditto',
],
},
},
]
WSGI_APPLICATION = 'dittodemo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': environ.get('DB_NAME'),
'USER': environ.get('DB_USERNAME'),
'PASSWORD': environ.get('DB_PASSWORD'),
'HOST': environ.get('DB_HOST'),
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_collected/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(',')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 500, # milliseconds
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# DJANGO DITTO SETTINGS
DITTO_FLICKR_DIR_BASE = 'flickr'
DITTO_FLICKR_DIR_PHOTOS_FORMAT = '%Y/%m/%d'
DITTO_FLICKR_USE_LOCAL_MEDIA = False
DITTO_TWITTER_DIR_BASE = 'twitter'
DITTO_TWITTER_USE_LOCAL_MEDIA = False
|
[
"phil@gyford.com"
] |
phil@gyford.com
|
916e183c70b1243b4a91f925cfb582f468642add
|
c8cd63041471e7a20bf3a15c3ca96b7573f5f727
|
/load_test_parser.py
|
2aa9ecfacbaac77a83ac5488be6c85d639cefd4b
|
[] |
no_license
|
ansonmiu0214/airbus_prototype
|
a992b9a04ba35da54088ff399975aac4efc7046a
|
0647ec3cee330e4c58a40e10a946e57478e316ad
|
refs/heads/master
| 2020-04-26T09:24:12.565650
| 2019-03-09T11:42:39
| 2019-03-09T11:42:39
| 173,453,998
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/bin/python3
if __name__ == "__main__":
with open('output_2.txt', 'r') as f:
lines = f.readlines()
relevant = [line.strip().split('Current latency')[1] for line in lines if line.startswith('Current latency')]
relevant = list(map(float, map(lambda x: x.strip(), relevant)))
max_latency = max(relevant)
min_latency = min(relevant)
avg_latency = sum(relevant) / len(relevant)
print("Max latency (ms):", max_latency)
print("Min latency (ms):", min_latency)
print("Avg latency (ms):", avg_latency)
|
[
"ansonmiu0214@gmail.com"
] |
ansonmiu0214@gmail.com
|
31e9cd4c46e9ca7dcf3f0bccfa1cf4e7f7aa7945
|
27aaadf435779c29012233cb1dacf27bd9dd0d0f
|
/imagesearch-20201214/setup.py
|
158893f64912b1bb6ea8f0b27a99576256e3835a
|
[
"Apache-2.0"
] |
permissive
|
aliyun/alibabacloud-python-sdk
|
afadedb09db5ba6c2bc6b046732b2a6dc215f004
|
e02f34e07a7f05e898a492c212598a348d903739
|
refs/heads/master
| 2023-08-22T20:26:44.695288
| 2023-08-22T12:27:39
| 2023-08-22T12:27:39
| 288,972,087
| 43
| 29
| null | 2022-09-26T09:21:19
| 2020-08-20T10:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_imagesearch20201214.
Created on 16/12/2022
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_imagesearch20201214"
NAME = "alibabacloud_imagesearch20201214" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud image search (20201214) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.8, <1.0.0",
"alibabacloud_oss_sdk>=0.1.0, <1.0.0",
"alibabacloud_openplatform20191219>=2.0.0, <3.0.0",
"alibabacloud_oss_util>=0.0.5, <1.0.0",
"alibabacloud_tea_fileform>=0.0.3, <1.0.0",
"alibabacloud_tea_openapi>=0.3.6, <1.0.0",
"alibabacloud_openapi_util>=0.2.0, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","imagesearch20201214"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
2cd72276ffa96c0292769e9e855768fde32ae8b2
|
f28b2ec517ac93526ac66b0b4a4fccc8614739d0
|
/HRC_arlac_monitor/Save/zinfo.py
|
7b806e9f132141ae3ab8e3b52d11d788f490533d
|
[] |
no_license
|
tisobe/HRC
|
8b8f7137e0804d7478da17d0796c0f25c19c04e6
|
aaba4e5512b14c92b8d413dd173e9e944041fa4d
|
refs/heads/master
| 2020-03-17T14:43:00.972966
| 2018-05-16T15:28:29
| 2018-05-16T15:28:29
| 133,683,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,944
|
py
|
#!/usr/bin/env /proj/sot/ska/bin/python
#############################################################################################################
# #
# hrc_gain_find_ar_lac.py: find new AL Lac observations and put in a list #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Sep 24, 2014 #
# #
#############################################################################################################
import os
import sys
import re
import string
import random
import operator
import numpy
import unittest
from astropy.table import Table
from Ska.DBI import DBI
#
#--- reading directory list
#
path = '/data/mta/Script/HRC/Gain/house_keeping/dir_list_py'
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec "%s = %s" %(var, line)
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import convertTimeFormat as tcnv
import mta_common_functions as mcf
#
#--- sql realated settings
#
db_user = 'browser'
db_server = 'ocatsqlsrv'
file = bdata_dir + '/.targpass'
db_passwd = mcf.get_val(file)
#---------------------------------------------------------------------------------------------------
#-- hrc_gain_find_ar_lac: find new AL Lac observations and put in a list --
#---------------------------------------------------------------------------------------------------
def hrc_gain_find_ar_lac():
"""
find new AL Lac observations and put in a list
Input: none, but the data will be read from mp_reports and also hrc_obsid_list in <house_keeping>
Output: "./candidate_list" which lists obsids of new AR Lac observations
candidate_list it also returns the same list
"""
hrc_list = hrc_gain_find_hrc_obs()
candidate_list = hrc_gain_test_obs(hrc_list)
return candidate_list
#---------------------------------------------------------------------------------------------------
#-- hrc_gain_find_ar_lac: find new AL Lac observations and put in a list --
#---------------------------------------------------------------------------------------------------
def hrc_gain_find_hrc_obs():
"""
select out the current hrc observations and create test candidate list
Input: none, but the data will be read from mp_reports and also hrc_obsid_list in <house_keeping>
Output: new_obs --- recently observed HRC obsid list
"""
#
#--- read obsid list of AR Lac we already checked
#
file = house_keeping + '/hrc_obsid_list'
f = open(file, 'r')
obsid_list = [line.strip() for line in f.readlines()]
f.close()
#
#--- find HRC events from a recent mp_reports
#
page = '/data/mta_www/mp_reports/events/mta_events.html'
f = open(page, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
data_list = []
for ent in data:
m1 = re.search('HRC', ent)
m2 = re.search('Obsid', ent)
if (m1 is not None) and (m2 is not None):
atemp = re.split('\/', ent)
data_list.append(atemp[1])
#
#--- select out obsids which we have not checked before
#
new_obs = []
for ent in data_list:
chk = 0
for comp in obsid_list:
if ent == comp:
chk = 1
continue
if chk > 0:
continue
new_obs.append(ent)
return new_obs
#---------------------------------------------------------------------------------------------------
#-- hrc_gain_test_obs: find new AL Lac observations from a hrc obsid list ----
#---------------------------------------------------------------------------------------------------
def hrc_gain_test_obs(new_obs, test=''):
"""
find new AL Lac observations from a hrc obsid list
Input: new_obs --- a list of hrc obsids
test --- a test indicator. if it is other than "", test will run
Output: "./candidate_list" which lists obsids of new AR Lac observations
candidate_list it also returns the same list
"""
if test == "":
f1 = open('./candidate_list', 'w')
file = house_keeping + 'hrc_obsid_list'
file2 = house_keeping + 'hrc_obsid_list~'
cmd = 'cp -f ' + file + ' ' + file2
os.system(cmd)
f2 = open(file, 'a')
candidate_list = []
for obsid in new_obs:
#
#--- open sql database and extract data we need
#
db = DBI(dbi='sybase', server=db_server, user=db_user, passwd=db_passwd, database='axafocat')
cmd = 'select obsid,targid,seq_nbr,targname,grating,instrument from target where obsid=' + obsid
query_results = db.fetchall(cmd)
if len(query_results):
query_results = Table(query_results)
line = query_results['targname'].data
targname = line[0]
#
#--- if the observation is AR Lac, write it down in candidate_list
#
m1 = re.search('arlac', targname.lower())
if m1 is not None:
line = obsid + '\n'
candidate_list.append(obsid)
if test == '':
f1.write(line)
f2.write(line)
if test == '':
f1.close()
f2.close()
return candidate_list
#-----------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST ---
#-----------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
def test_hrc_gain_test_obs(self):
page = house_keeping + '/Test_prep/candidate'
f = open(page, 'r')
data_list = [line.strip() for line in f.readlines()]
f.close()
test_candidates = ['14313', '14314', '14315', '14316']
candidates = hrc_gain_test_obs(data_list, test='test')
self.assertEquals(candidates, test_candidates)
#--------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"isobe@head.cfa.harvard.edu"
] |
isobe@head.cfa.harvard.edu
|
a5df9c3956ce741cb2eb6a9c0155d07349363a1d
|
7cb626363bbce2f66c09e509e562ff3d371c10c6
|
/multimodel_inference/py3_v1/olds/sc3emlsm.py
|
dd484cd9338ae7f71742e4f7d8a632754eab4a2b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
z0on/AFS-analysis-with-moments
|
76bfd6b0361ab7e9173144dbd21b6fa2c7bf1795
|
eea4735b3b6fbe31c4e396da3d798387884a1500
|
refs/heads/master
| 2023-07-31T20:49:20.865161
| 2023-07-19T06:57:32
| 2023-07-19T06:57:32
| 96,915,117
| 4
| 5
| null | 2020-09-02T17:39:08
| 2017-07-11T16:38:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
#!/usr/bin/env python
# split, three epochs in each pop, asymmetric migration at same rates in all epochs
# n(para): 11
import matplotlib
matplotlib.use('PDF')
import moments
import pylab
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
if len(sys.argv)==9:
params = np.loadtxt(sys.argv[8], delimiter=" ", unpack=False)
else:
params=[1,1,1,1,1,1,1,1,1,1,0.01]
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
# set Polarized=False below for folded AFS analysis
fs = moments.Spectrum.from_file(infile)
data=fs.project(projections)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split into unequal pop sizes with asymmetrical migration
def sc3ei(params , ns):
# p_misid: proportion of misidentified ancestral states
nu1_1, nu2_1, nu1_2,nu2_2,nu1_3,nu2_3,T1, T2, T3,m, p_misid = params
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1_1, nu2_1], T1, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_2, nu2_2], T2, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_3, nu2_3], T3, m = np.array([[0, m], [m, 0]]))
return (1-p_misid)*fs + p_misid*moments.Numerics.reverse_array(fs)
func=sc3ei
upper_bound = [100, 100, 100,100,100, 100, 100, 100,100, 200,0.25]
lower_bound = [1e-3,1e-3, 1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="sc3emlsm_"+ind+".png", pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
# printing parameters and their SDs
print( "RESULT","sc3emlsm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta)
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=0.1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("sc3emlsm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
|
[
"matz@utexas.edu"
] |
matz@utexas.edu
|
e8d46842c93e593d1abf393ca6aab62c76269e13
|
9f35bea3c50668a4205c04373da95195e20e5427
|
/third_party/blink/renderer/bindings/scripts/bind_gen/path_manager.py
|
84b01c38d33b1cf2e4c6f677ce0cd1442c1f52c8
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
foolcodemonkey/chromium
|
5958fb37df91f92235fa8cf2a6e4a834c88f44aa
|
c155654fdaeda578cebc218d47f036debd4d634f
|
refs/heads/master
| 2023-02-21T00:56:13.446660
| 2020-01-07T05:12:51
| 2020-01-07T05:12:51
| 232,250,603
| 1
| 0
|
BSD-3-Clause
| 2020-01-07T05:38:18
| 2020-01-07T05:38:18
| null |
UTF-8
|
Python
| false
| false
| 5,524
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import web_idl
from . import name_style
from .blink_v8_bridge import blink_class_name
class PathManager(object):
"""
Provides a variety of paths such as Blink headers and output files. Unless
explicitly specified, returned paths are relative to the project's root
directory or the root directory of generated files.
e.g. "third_party/blink/renderer/..."
About output files, there are two cases.
- cross-components case:
APIs are generated in 'core' and implementations are generated in
'modules'.
- single component case:
Everything is generated in a single component.
"""
_REQUIRE_INIT_MESSAGE = ("PathManager.init must be called in advance.")
_is_initialized = False
@classmethod
def init(cls, root_src_dir, root_gen_dir, component_reldirs):
"""
Args:
root_src_dir: Project's root directory, which corresponds to "//"
in GN.
root_gen_dir: Root directory of generated files, which corresponds
to "//out/Default/gen" in GN.
component_reldirs: Pairs of component and output directory relative
to |root_gen_dir|.
"""
assert not cls._is_initialized
assert isinstance(root_src_dir, str)
assert isinstance(root_gen_dir, str)
assert isinstance(component_reldirs, dict)
cls._blink_path_prefix = posixpath.sep + posixpath.join(
"third_party", "blink", "renderer", "")
cls._root_src_dir = posixpath.abspath(root_src_dir)
cls._root_gen_dir = posixpath.abspath(root_gen_dir)
cls._component_reldirs = {
component: posixpath.normpath(rel_dir)
for component, rel_dir in component_reldirs.iteritems()
}
cls._is_initialized = True
@staticmethod
def gen_path_to(path):
"""
Returns the absolute path of |path| that must be relative to the root
directory of generated files.
"""
assert PathManager._is_initialized, PathManager._REQUIRE_INIT_MESSAGE
return posixpath.abspath(
posixpath.join(PathManager._root_gen_dir, path))
@classmethod
def relpath_to_project_root(cls, path):
index = path.find(cls._blink_path_prefix)
if index < 0:
assert path.startswith(cls._blink_path_prefix[1:])
return path
return path[index + 1:]
def __init__(self, idl_definition):
assert self._is_initialized, self._REQUIRE_INIT_MESSAGE
idl_path = PathManager.relpath_to_project_root(
posixpath.normpath(idl_definition.debug_info.location.filepath))
idl_basepath, _ = posixpath.splitext(idl_path)
self._idl_dir, self._idl_basename = posixpath.split(idl_basepath)
components = sorted(idl_definition.components) # "core" < "modules"
if len(components) == 1:
component = components[0]
self._is_cross_components = False
self._api_component = component
self._impl_component = component
elif len(components) == 2:
assert components[0] == "core"
assert components[1] == "modules"
self._is_cross_components = True
self._api_component = components[0]
self._impl_component = components[1]
else:
assert False
self._api_dir = self._component_reldirs[self._api_component]
self._impl_dir = self._component_reldirs[self._impl_component]
self._v8_bind_basename = name_style.file("v8",
idl_definition.identifier)
self._blink_dir = self._idl_dir
self._blink_basename = name_style.file(
blink_class_name(idl_definition))
@property
def idl_dir(self):
return self._idl_dir
def blink_path(self, filename=None, ext=None):
"""
Returns a path to a Blink implementation file relative to the project
root directory, e.g. "third_party/blink/renderer/..."
"""
return self._join(
dirpath=self._blink_dir,
filename=(filename or self._blink_basename),
ext=ext)
@property
def is_cross_components(self):
return self._is_cross_components
@property
def api_component(self):
return self._api_component
@property
def api_dir(self):
return self._api_dir
def api_path(self, filename=None, ext=None):
return self._join(
dirpath=self.api_dir,
filename=(filename or self._v8_bind_basename),
ext=ext)
@property
def impl_component(self):
return self._impl_component
@property
def impl_dir(self):
return self._impl_dir
def impl_path(self, filename=None, ext=None):
return self._join(
dirpath=self.impl_dir,
filename=(filename or self._v8_bind_basename),
ext=ext)
# TODO(crbug.com/1034398): Remove this API
def dict_path(self, filename=None, ext=None):
return self.blink_path(filename, ext)
@staticmethod
def _join(dirpath, filename, ext=None):
if ext is not None:
filename = posixpath.extsep.join([filename, ext])
return posixpath.join(dirpath, filename)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
aadcbf0b94233b17dbea12ad397b0590f1241baf
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/helpers/edgedetectcolorcontroller.py
|
65c2877cf2f2b4814e0f3a4c532a57a905ba4be7
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,793
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/helpers/EdgeDetectColorController.py
import BigWorld
import Math
from PlayerEvents import g_playerEvents
from Account import PlayerAccount
from helpers import dependency
from skeletons.account_helpers.settings_core import ISettingsCore
_DEFAULT_OVERLAY_COLOR = Math.Vector4(1, 1, 1, 1)
_OVERLAY_SOLID_KEYS = ('overlay', 'destructible')
_OVERLAY_PATTERN_KEYS = ('overlayForeground', 'overlay', 'destructibleForeground', 'destructible')
_OVERLAY_TARGET_INDEXES = {'enemy': 1,
'friend': 2}
g_instance = None
class EdgeDetectColorController(object):
settingsCore = dependency.descriptor(ISettingsCore)
def __init__(self, dataSec):
self.__colors = {'common': dict(),
'colorBlind': dict()}
self.__readColors(self.__colors, 'common', dataSec)
self.__readColors(self.__colors, 'colorBlind', dataSec)
def updateColors(self):
self.__changeColor({'isColorBlind': self.settingsCore.getSetting('isColorBlind')})
def create(self):
self.settingsCore.onSettingsChanged += self.__changeColor
g_playerEvents.onAccountShowGUI += self.__onAccountShowGUI
def destroy(self):
self.settingsCore.onSettingsChanged -= self.__changeColor
g_playerEvents.onAccountShowGUI -= self.__onAccountShowGUI
def __readColors(self, colors, cType, section):
cName = '{}/'.format(cType)
out, common = colors[cType], colors['common']
out['self'] = section.readVector4(cName + 'self', common.get('self', Math.Vector4(0.2, 0.2, 0.2, 0.5)))
out['enemy'] = section.readVector4(cName + 'enemy', common.get('enemy', Math.Vector4(1, 0, 0, 0.5)))
out['friend'] = section.readVector4(cName + 'friend', common.get('friend', Math.Vector4(0, 1, 0, 0.5)))
out['flag'] = section.readVector4(cName + 'flag', common.get('flag', Math.Vector4(1, 1, 1, 1)))
out['hangar'] = section.readVector4(cName + 'hangar', common.get('hangar', Math.Vector4(1, 1, 0, 1)))
self.__readOverlayColors(out, common, cType, 'overlaySolidColors', _OVERLAY_SOLID_KEYS, section)
self.__readOverlayColors(out, common, cType, 'overlayPatternColors', _OVERLAY_PATTERN_KEYS, section)
def __readOverlayColors(self, out, common, cType, overlayType, keys, section):
targets = ['enemy', 'friend']
common, out[overlayType] = common.get(overlayType) or {}, {}
for target in targets:
commonTarget, out[overlayType][target] = common.get(target) or {}, {}
targetPath = '/'.join([cType, overlayType, target]) + '/'
for key in keys:
color = section.readVector4(targetPath + key, commonTarget.get(key, _DEFAULT_OVERLAY_COLOR))
out[overlayType][target][key] = color
out[overlayType][target]['packed'] = [ out[overlayType][target][key] for key in keys ]
def __onAccountShowGUI(self, ctx):
self.updateColors()
def __changeColor(self, diff):
if 'isColorBlind' not in diff:
return
isHangar = isinstance(BigWorld.player(), PlayerAccount)
cType = 'colorBlind' if diff['isColorBlind'] else 'common'
colors = self.__colors[cType]
colorsSet = (colors['hangar'] if isHangar else colors['self'],
colors['enemy'],
colors['friend'],
colors['flag'])
i = 0
for c in colorsSet:
BigWorld.wgSetEdgeDetectEdgeColor(i, c)
i += 1
for target, idx in _OVERLAY_TARGET_INDEXES.iteritems():
BigWorld.wgSetEdgeDetectSolidColors(idx, *colors['overlaySolidColors'][target]['packed'])
BigWorld.wgSetEdgeDetectPatternColors(idx, *colors['overlayPatternColors'][target]['packed'])
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
f9ce519233fd2afe6e44f4421dd950aae83bb057
|
9c718b8964d476db4728fc0cf18e24292dd8cf60
|
/mxshop/MxShop/apps/goods/migrations/0003_auto_20180623_1326.py
|
8a898faf7e1b7893e241d7d7688d7cf50c63bdd5
|
[] |
no_license
|
1400720231/Django-Projects
|
960f9226e0f5c01628afd65b9a78e810fdeb1b83
|
72f96788163f7ffe76e7599966ddbfa1d2199926
|
refs/heads/master
| 2021-06-25T17:41:14.147011
| 2019-04-03T02:24:38
| 2019-04-03T02:24:38
| 114,955,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-06-23 13:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0002_goodscategorybrand_category'),
]
operations = [
migrations.AlterModelOptions(
name='goodscategory',
options={'verbose_name': '商品类目', 'verbose_name_plural': '商品类目'},
),
]
|
[
"937886362@qq.com"
] |
937886362@qq.com
|
40903430884cb63c47a7d28f94fa27d31ac66245
|
92adce9ebf87fc374f6cc093f68d14b7dc7a697f
|
/read.py
|
2a08a459948865f3bdc2e5d27e913a2e6bdb6128
|
[
"MIT"
] |
permissive
|
nicolay-r/RuSentRel
|
1f4f5678fdcbe342bda82834d6192a70b5c8b94c
|
4fc0df1580d3da21f0be1e832e403652f73caed1
|
refs/heads/master
| 2021-08-27T16:49:34.342494
| 2021-08-03T06:48:39
| 2021-08-03T06:48:39
| 128,990,497
| 6
| 2
|
MIT
| 2021-08-03T06:48:39
| 2018-04-10T20:22:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
#!/usr/bin/python
from reader.common.bound import Bound
from reader.common.entities.collection import EntityCollection
from reader.common.lemmatization.mystem import MystemWrapper
from reader.entities.collection import RuSentRelDocumentEntityCollection
from reader.entities.entity import RuSentRelEntity
from reader.news import RuSentRelNews
from reader.opinions.collection import RuSentRelOpinionCollection
from reader.opinions.opinion import RuSentRelOpinion
from reader.sentence import RuSentRelSentence
from reader.synonyms import RuSentRelSynonymsCollection
import utils
# Initializing stemmer
stemmer = MystemWrapper()
# Reading synonyms collection.
synonyms = RuSentRelSynonymsCollection.from_file('synonyms.txt', stemmer=stemmer)
# Reading 'train' subfolder of collection.
train_root = 'test'
for news_id in utils.get_rusentrel_test_indices():
print("NewsID: {}".format(news_id))
# Init filepaths
entities_filepath = utils.get_rusentrel_entity_filepath(news_id, root=train_root)
news_filepath = utils.get_rusentrel_news_filepath(news_id, root=train_root)
opinion_filepath = utils.get_rusentrel_format_sentiment_opin_filepath(news_id, root=train_root, is_etalon=True)
# Read collections
entities = RuSentRelDocumentEntityCollection.from_file(entities_filepath, stemmer=stemmer, synonyms=synonyms)
news = RuSentRelNews.from_file(news_filepath, entities)
opininons = RuSentRelOpinionCollection.from_file(opinion_filepath, synonyms=synonyms)
#############
# Application
#############
# Example: Access to the read OPINIONS collection.
for opinion in opininons:
assert(isinstance(opinion, RuSentRelOpinion))
print("\t{}->{} ({}) [synonym groups opinion: {}->{}]".format(
opinion.SourceValue,
opinion.TargetValue,
opinion.Sentiment.to_str(),
# Considering synonyms.
synonyms.get_synonym_group_index(opinion.SourceValue),
synonyms.get_synonym_group_index(opinion.TargetValue)))
# Example: Access to the read NEWS collection.
for sentence in news.iter_sentences():
assert(isinstance(sentence, RuSentRelSentence))
# Access to text.
print("\tSentence: '{}'".format(sentence.Text.strip()))
# Access to inner entities.
for entity, bound in sentence.iter_entity_with_local_bounds():
assert(isinstance(entity, RuSentRelEntity))
assert(isinstance(bound, Bound))
print("\tEntity: {}, text position: ({}-{}), IdInDocument: {}".format(
entity.Value,
bound.Position,
bound.Position + bound.Length,
entity.IdInDocument))
# Example: Access to the read ENTITIES collection.
example_entity = entities.get_entity_by_index(10)
entities_list = entities.try_get_entities(example_entity.Value,
group_key=EntityCollection.KeyType.BY_SYNONYMS)
print("\tText synonymous to: '{}'".format(example_entity.Value))
print("\t[{}]".format(", ".join([str((e.Value, str(e.IdInDocument))) for e in entities_list])))
|
[
"kolyarus@yandex.ru"
] |
kolyarus@yandex.ru
|
c7d4ab5c5775b06d53e956773414cb4210cd5023
|
c4a3eeabe660e5d6b42f704d0325a755331ab3c5
|
/hyperion/hyperion_sedcom.py
|
93f0c5a6dcaec34cae9ebd884d6942b366f84b70
|
[] |
no_license
|
yaolun/misc
|
dfcfde2ac4a6429201644e1354912d3a064f9524
|
049b68ce826ddf638cec9a3b995d9ee84bf6075a
|
refs/heads/master
| 2021-01-21T23:54:08.953071
| 2018-06-02T19:46:18
| 2018-06-02T19:46:18
| 26,666,071
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,970
|
py
|
def hyperion_sedcom(modellist, outdir, plotname, obs_data=None, labellist=None, lbol=False, legend=True, mag=1.5,\
obs_preset='sh', dstar=1, aper=[3.6, 4.5, 5.8, 8.0, 10, 20, 24, 70, 160, 250, 350, 500, 850]):
"""
obs_data: dictionary which obs_data['spec'] is spectrum and obs_data['phot'] is photometry
obs_data['label'] = (wave, Fv, err) in um and Jy by default
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import astropy.constants as const
from hyperion.model import ModelOutput
from scipy.interpolate import interp1d
from l_bol import l_bol
import seaborn as sb
# from seaborn import color_palette
# from seaborn_color import seaborn_color
# constant setup
c = const.c.cgs.value
pc = const.pc.cgs.value
if labellist == None:
if legend == True:
print 'Model labels are not provided. Use their filename instead.'
labellist = []
for i in range(0, len(modellist)):
labellist.append(r'$\mathrm{'+os.path.splitext(os.path.basename(modellist[i]))[0]+'}$')
# cm = seaborn_color('colorblind',len(modellist))
sb.set(style="white")
cm = sb.color_palette('husl', len(modellist))
# create figure object
fig = plt.figure(figsize=(8*mag,6*mag))
ax = fig.add_subplot(111)
# sb.set_style('ticks')
print 'plotting with aperture at ', aper, 'um'
# if the obs_data is provided than plot the observation first. In this way, models won't be blocked by data
if obs_data != None:
if 'spec' in obs_data.keys():
(wave, fv, err) = obs_data['spec']
vfv = c/(wave*1e-4)*fv*1e-23
l_bol_obs = l_bol(wave, fv, dstar)
if legend == True:
ax.text(0.75,0.9,r'$\mathrm{L_{bol}= %5.2f L_{\odot}}$' % l_bol_obs,fontsize=mag*16,transform=ax.transAxes)
# general plotting scheme
if obs_preset == None:
spec, = ax.plot(np.log10(wave),np.log10(vfv),'-',color='k',linewidth=1.5*mag, label=r'$\mathrm{observations}$')
# plot spitzer, Herschel pacs and spire in different colors
elif obs_preset == 'sh':
# spitzer
spitz, = ax.plot(np.log10(wave[wave < 50]),np.log10(vfv[wave < 50]),'-',color='b',linewidth=1*mag,\
label=r'$\mathrm{\it Spitzer}$')
# herschel
pacs, = ax.plot(np.log10(wave[(wave < 190.31) & (wave > 50)]),np.log10(vfv[(wave < 190.31) & (wave > 50)]),'-',\
color='Green',linewidth=1*mag, label=r'$\mathrm{{\it Herschel}-PACS}$')
spire, = ax.plot(np.log10(wave[wave >= 190.31]),np.log10(vfv[wave >= 190.31]),'-',color='k',linewidth=1*mag,\
label=r'$\mathrm{{\it Herschel}-SPIRE}$')
spec = [spitz, pacs, spire]
if 'phot' in obs_data.keys():
(wave_p, fv_p, err_p) = obs_data['phot']
vfv_p = c/(wave_p*1e-4)*fv_p*1e-23
vfv_p_err = c/(wave_p*1e-4)*err_p*1e-23
phot, = ax.plot(np.log10(wave_p),np.log10(vfv_p),'s',mfc='DimGray',mec='k',markersize=8)
ax.errorbar(np.log10(wave_p),np.log10(vfv_p),yerr=[np.log10(vfv_p)-np.log10(vfv_p-vfv_p_err), np.log10(vfv_p+vfv_p_err)-np.log10(vfv_p)],\
fmt='s',mfc='DimGray',mec='k',markersize=8)
modplot = dict()
for imod in range(0, len(modellist)):
m = ModelOutput(modellist[imod])
# if not specified, distance of the star will be taken as 1 pc.
if aper == None:
sed_dum = m.get_sed(group=0, inclination=0, aperture=-1, distance=dstar * pc)
modplot['mod'+str(imod+1)], = ax_sed.plot(np.log10(sed_dum.wav), np.log10(sed_dum.val), '-', color='GoldenRod', linewidth=1.5*mag)
else:
vfv_aper = np.empty_like(aper)
for i in range(0, len(aper)):
sed_dum = m.get_sed(group=i+1, inclination=0, aperture=-1, distance=dstar * pc)
f = interp1d(sed_dum.wav, sed_dum.val)
vfv_aper[i] = f(aper[i])
modplot['mod'+str(imod+1)], = ax.plot(np.log10(aper),np.log10(vfv_aper),'o',mfc='None',mec=cm[imod],markersize=12,\
markeredgewidth=3, label=labellist[imod], linestyle='-',color=cm[imod],linewidth=1.5*mag)
# plot fine tune
ax.set_xlabel(r'$\mathrm{log~\lambda~({\mu}m)}$',fontsize=mag*20)
ax.set_ylabel(r'$\mathrm{log~\nu S_{\nu}~(erg/cm^{2}/s)}$',fontsize=mag*20)
[ax.spines[axis].set_linewidth(1.5*mag) for axis in ['top','bottom','left','right']]
ax.minorticks_on()
ax.tick_params('both',labelsize=mag*18,width=1.5*mag,which='major',pad=15,length=5*mag)
ax.tick_params('both',labelsize=mag*18,width=1.5*mag,which='minor',pad=15,length=2.5*mag)
if obs_preset == 'sh':
ax.set_ylim([-14,-7])
ax.set_xlim([0,3])
if legend == True:
lg = ax.legend(loc='best',fontsize=14*mag,numpoints=1,framealpha=0.3)
# Write out the plot
fig.savefig(outdir+plotname+'.pdf',format='pdf',dpi=300,bbox_inches='tight')
fig.clf()
# import numpy as np
# from get_bhr71_obs import get_bhr71_obs
# obs_data = get_bhr71_obs('/Users/yaolun/bhr71/obs_for_radmc/')
# mod_num = [32,56]
# modellist = []
# modir = '/Users/yaolun/test/model'
# for mod in mod_num:
# modellist.append(modir+str(mod)+'/model'+str(mod)+'.rtout')
# outdir = '/Users/yaolun/test/'
# hyperion_sedcom(modellist, outdir, 'test', obs_data=obs_data, lbol=True, dstar=178)
|
[
"allenya@gmail.com"
] |
allenya@gmail.com
|
6eed42b73bb347a25e6a01089fb43ea62d05b786
|
282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19
|
/Malware1/venv/Lib/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py
|
9fd1780c8ab77d3e11c59fa5cff81d29267a95f1
|
[] |
no_license
|
sameerakhtar/CyberSecurity
|
9cfe58df98495eac6e4e2708e34e70b7e4c055d3
|
594973df27b4e1a43f8faba0140ce7d6c6618f93
|
refs/heads/master
| 2022-12-11T11:53:40.875462
| 2020-09-07T23:13:22
| 2020-09-07T23:13:22
| 293,598,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fa4d666d9e29f8945376652f7fb7084c3be064a66d0592c045742bd872cd8934
size 22242
|
[
"46763165+sameerakhtar@users.noreply.github.com"
] |
46763165+sameerakhtar@users.noreply.github.com
|
5744415b18511a257ef945bcfa2df6f5ae1b04d1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02954/s798759253.py
|
36a83144ddfc3b7f136a3c3568e3fead3056d7a7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
S = input() + 'R'
numbers = [0] * (len(S) - 1)
offset = 0
length = 0
right = 0
left = 0
for index, (s0, s1) in enumerate(zip(S, S[1:])):
length += 1
if s0 == 'R' and s1 == 'L':
right = length - 1
left = length
elif s0 == 'L' and s1 == 'R':
if length % 2 == 0:
numbers[offset + right] = length // 2
numbers[offset + left] = length // 2
elif right % 2 == 0:
numbers[offset + right] = length // 2 + 1
numbers[offset + left] = length // 2
else:
numbers[offset + right] = length // 2
numbers[offset + left] = length // 2 + 1
length = 0
offset = index + 1
print(*numbers)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c7c6601ede207fab5b186671af8d6270d38078ca
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0451_0500/LeetCode460_LFUCache.py
|
b209a542f7836f153654b987fa1ac33c01e19f6d
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,277
|
py
|
'''
Created on Apr 23, 2017
@author: MT
'''
class Node(object):
def __init__(self, key, val):
self.val = val
self.next = None
self.prev = None
self.freq = 1
self.key = key
class LFUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.hashmap = {}
self.freqMap = {}
self.length = 0
self.head = Node(-1, -1)
self.tail = Node(-1, -1)
self.head.next = self.tail
self.head.freq = float('-inf')
self.tail.freq = float('inf')
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.hashmap:
return -1
else:
value = self.hashmap[key].val
self.updateNode(self.hashmap[key])
return value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.capacity == 0:
return
if key in self.hashmap:
self.hashmap[key].val = value
self.updateNode(self.hashmap[key])
else:
if self.capacity > self.length:
self.length += 1
node = Node(key, value)
self.hashmap[key] = node
node.freq = 1
if 1 in self.freqMap:
tmp = self.freqMap[1][1] # tail of freq
nextNode = tmp.next
tmp.next = node
node.prev = tmp
node.next = nextNode
node.next.prev = node
self.freqMap[1][1] = node
else:
nextNode = self.head.next
node.next = nextNode
node.prev = self.head
nextNode.prev = node
self.head.next = node
self.freqMap[1] = [node, node]
else:
node = Node(key, value)
self.hashmap[key] = node
firstNode = self.head.next
freq = firstNode.freq
if self.freqMap[freq][0] == self.freqMap[freq][1]:
self.head.next = firstNode.next
firstNode.next.prev = self.head
del self.freqMap[freq]
else:
self.freqMap[freq][0] = self.freqMap[freq][0].next
self.head.next = firstNode.next
firstNode.next.prev = self.head
del self.hashmap[firstNode.key]
if 1 in self.freqMap:
tmp = self.freqMap[1][1] # tail of freq
nextNode = tmp.next
tmp.next = node
node.prev = tmp
node.next = nextNode
node.next.prev = node
self.freqMap[1][1] = node
else:
nextNode = self.head.next
nextNode.prev = node
node.next = nextNode
self.head.next = node
node.prev = self.head
self.freqMap[1] = [node, node]
def updateNode(self, node):
freq = node.freq
nextNode = self.freqMap[freq][1].next
node.prev.next = node.next
node.next.prev = node.prev
if self.freqMap[freq][0] == self.freqMap[freq][1]:
del self.freqMap[freq]
else:
if self.freqMap[freq][0] == node:
self.freqMap[freq][0] = node.next
if self.freqMap[freq][1] == node:
self.freqMap[freq][1] = node.prev
node.freq += 1
freq += 1
if freq in self.freqMap:
tail = self.freqMap[freq][1]
node.next = tail.next
tail.next = node
node.next.prev = node
node.prev = tail
self.freqMap[freq][1] = node
else:
prevNode = nextNode.prev
prevNode.next = node
node.next = nextNode
nextNode.prev = node
node.prev = prevNode
self.freqMap[freq] = [node, node]
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
8b634a88c02b8398beec70c04cb11898abd76653
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/models/PaddleHub/hub_all_func/test_ID_Photo_GEN.py
|
f43c8cd99596b00e1f7cb91e1e66c9e5fca71e42
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
"""ID_Photo_GEN"""
import os
import cv2
import paddle
import paddlehub as hub
if paddle.is_compiled_with_cuda():
paddle.set_device("gpu")
use_gpu = True
else:
paddle.set_device("cpu")
use_gpu = False
def test_ID_Photo_GEN_predict():
"""ID_Photo_GEN"""
os.system("hub install ID_Photo_GEN")
model = hub.Module(name="ID_Photo_GEN")
result = model.Photo_GEN(
images=[cv2.imread("face_01.jpeg")],
paths=None,
batch_size=1,
output_dir="output_ID_Photo_GEN",
visualization=True,
use_gpu=use_gpu,
)
print(result)
os.system("hub uninstall ID_Photo_GEN")
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
e0993846926170ccec472592ba4c25eadd8b01b5
|
0fa98dbc4d6256121b9f478a13ff2254047fb543
|
/12_01_typical_interview_tasks/K. One in a binary number.py
|
b5aaa9111c3f0e1f571ed9c30e528b3a2e6a41b5
|
[] |
no_license
|
vamotest/yandex_algorithms
|
48d5b29cb6e2789ea8f7e8024c798851058f1d4c
|
a588da3d21ff95e2437818493769719600f3eaf7
|
refs/heads/master
| 2023-03-19T20:44:59.373046
| 2021-01-20T19:06:28
| 2021-01-20T19:06:28
| 330,421,669
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
def binary_convert(number):
binary_number = ''
while number > 0:
binary_number = str(number % 2) + binary_number
number = number // 2
return binary_number
def find_amount(binary_number):
return binary_number.count('1')
if __name__ == '__main__':
count = find_amount(binary_convert(int(input())))
print(count)
|
[
"vamotest@gmail.com"
] |
vamotest@gmail.com
|
de6b02deb9e80be2b82f5b65928b23b1d4744a49
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4029/codes/1643_1054.py
|
8a3c8ff612464a89b4701c45ea86a8670cc77b5b
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
x= float(input("Coordenada x: "))
y= float(input("Coordenada y: "))
if (((2 * x) + y == 3)):
print ("ponto pertence a reta")
else:
print ("ponto nao pertence a reta")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
37591cad599ca130ba2cdf6a22dc5e86d13f686f
|
ae0f37ebb76bce44c5e366d62424b5ef411f94b3
|
/3. OOP/Tutorial/Rational.py
|
f83ae514fef27d48b4fc04bc06c6667e5dfeaacd
|
[] |
no_license
|
khangsk/PPL
|
a30b656a8a70b8f6dd96ce39f57d3540495a5a26
|
b8e3a04210796e03ff257c05cd1e60923f016d2f
|
refs/heads/master
| 2023-02-21T09:28:25.216162
| 2021-01-18T09:35:15
| 2021-01-18T09:35:15
| 306,542,959
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
import math
class Rational:
def __init__(self, n = 0, d = 1):
assert(d != 0)
g = math.gcd(abs(n), abs(d))
self.numer = int(n / g)
self.denom = int(d / g)
def __str__(self):
return str(self.numer) + "/" + str(self.denom)
def __add__(self, another):
targetType = type(another).__name__
if targetType == 'int':
return self.addRational(Rational(another))
elif targetType == 'Rational':
return self.addRational(another)
raise Exception('Rational not support operator + with type ' + targetType)
def addRational(self, r):
assert(type(r).__name__ == 'Rational')
return Rational(
self.numer * r.denom + self.denom * r.numer,
self.denom * r.denom
)
|
[
"hoanggiakhangsk@gmail.com"
] |
hoanggiakhangsk@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.