blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ead703529581292a7c663c69e44344fb910610e4
|
801f367bd19b8f2ab08669fd0a85aad7ace961ac
|
/project/experiments/exp_030_robogrammar/src/common/linux.py
|
8d7990c8cae792fb0aa965e86f5f64a34241697d
|
[
"MIT"
] |
permissive
|
Wendong-Huo/thesis-bodies
|
d91b694a6b1b6a911476573ed1ed27eb27fb000d
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
refs/heads/main
| 2023-04-17T18:32:38.541537
| 2021-03-12T19:53:23
| 2021-03-12T19:53:23
| 623,471,326
| 1
| 0
| null | 2023-04-04T12:45:48
| 2023-04-04T12:45:47
| null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
def fullscreen():
from pymouse import PyMouse
from pykeyboard import PyKeyboard
m = PyMouse()
k = PyKeyboard()
x_dim, y_dim = m.screen_size()
m.click(int(x_dim/3), int(y_dim/2), 1)
k.press_key(k.control_key)
k.tap_key(k.function_keys[11])
k.release_key(k.control_key)
|
[
"sliu1@uvm.edu"
] |
sliu1@uvm.edu
|
d3235a476b6b76bf59f61fb0854d4598582a9b60
|
57650db9c8bdd1bcf58860fc8f087d5772e79815
|
/Python/ArrayListProblems/RotateImage.py
|
30e6edc81b9cdc1facc8bbe84a2e711e537a6cd7
|
[] |
no_license
|
kocsenc/kchung
|
20573a0fae5154b85cdf30ba8d39a446a5ff9e02
|
0a9c236d785ca80d0c5b663bf3c6dccb6a3dca14
|
refs/heads/master
| 2021-01-19T08:32:50.403874
| 2015-03-18T17:36:00
| 2015-03-18T17:36:00
| 6,600,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Kocsen Chung
# Problem from Cracking the Coding Interview
# 1.6
# Given an image represented by a NxN Matrix of arrays size N, (each pixel is 4bytes),
# Can you rotate the image 90 degrees?
# Challenge: do it in place: space complexity
# Assumptions:
# Rotation is clockwise
import unittest
def rotate(matrix):
pass
class RotateTest(unittest.TestCase):
""" RotateTest Unite tests"""
|
[
"kocsenc@gmail.com"
] |
kocsenc@gmail.com
|
cb2f8b2bee5279f5b39bfd405cdf5b3b60f4cefc
|
24fc6bdca8f57a1ab8cb87935a722b02d9cf5192
|
/processors/linktableupdater.py
|
7de0bd781c9f48ef1b1ae85b12a443ea4275c3c3
|
[] |
no_license
|
necrop/gel_build
|
9c1eb48226082a244f74d6e01297ce5daaa8314e
|
ad88e7333b35898a0598a55ef4b18dba5f9dc59e
|
refs/heads/master
| 2016-09-16T06:47:25.320414
| 2014-05-05T07:15:25
| 2014-05-05T07:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
"""
LinkTableUpdater
"""
import os
from lex import lexconfig
from lex.odo.linkmanager import LinkInferrer, LinkUpdater
LINKS_DIR = lexconfig.ODO_LINKS_DIR
ODO_SOURCE_FILE = os.path.join(LINKS_DIR, 'source', '%s_to_oedlatest_20120817.xml')
ODO_OUTPUT_FILE = os.path.join(LINKS_DIR, '%s_to_oed.xml')
OED_SOURCE_FILE = os.path.join(LINKS_DIR, 'source', 'oed_to_ode.xml')
OED_TO_ODE = os.path.join(LINKS_DIR, 'oed_to_ode.xml')
OED_TO_NOAD = os.path.join(LINKS_DIR, 'oed_to_noad.xml')
def update_tables():
for dictname in ('ode', 'noad'):
print('Updating link table for %s...' % dictname.upper())
updater = LinkUpdater(
dictName=dictname,
odoIn=ODO_SOURCE_FILE % dictname,
odoOut=ODO_OUTPUT_FILE % dictname,
)
updater.update_odo(validLinksOnly=True)
print('Updating link table for OED -> ODE...')
updater = LinkUpdater(
dictName='ode',
oedIn=OED_SOURCE_FILE,
oedOut=OED_TO_ODE,
)
updater.update_oed(validLinksOnly=True)
def infer_noad():
print('Inferring link table for OED -> NOAD...')
inferrer = LinkInferrer(
inFile=OED_TO_ODE,
outFile=OED_TO_NOAD,
)
inferrer.infer()
|
[
"jmccracken@fastmail.fm"
] |
jmccracken@fastmail.fm
|
4fc933b8883c5643a089e799e661c1ffe1591cc4
|
9fe35ecd724eaa0bed1b842602756696739344d8
|
/IMDB_top_rate_movies.py
|
a93bca5a2cec1b06d53043d48e1c432fba0c6773
|
[] |
no_license
|
happy-thakur/python-scraping
|
fee2903257dcbd6c3431f2a5c5878f38e3364660
|
2f92f1054dae57f8c0768a0c262bf7037c20b9cb
|
refs/heads/master
| 2021-09-02T00:24:27.953340
| 2017-12-29T11:17:16
| 2017-12-29T11:17:16
| 115,716,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,827
|
py
|
####################################################################################
# MY FIRST PROJECT TO SCRAP IMDB WEBSITES TOP RATED MOVIES NAME AND POSTERS #
####################################################################################
# http://www.imdb.com/chart/top?ref_=nv_mv_250_6 #
# ------------------ #
# By: SURYANSH SINGH #
# ------------------ #
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
import os
class Film():
def __init__(self):
self.title = ""
self.rank = ""
self.year = ""
self.link = ""
def get_film_list():
driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
url = "http://www.imdb.com/chart/top?ref_=nv_mv_250_6"
driver.get(url)
# class="chart full-width"
soup = BeautifulSoup(driver.page_source, "lxml")
table = soup.find("table", class_ = "chart")
all_td = table.find_all('td', class_ = "titleColumn")
# print all_tr
film_list = []
for td in all_td:
ref_link = td.find('a')['href']
full_title = td.text.encode("UTF-8").strip().replace('\n', '').replace(' ', '')
print full_title
rank = full_title.split('.')[0]
title = full_title.split('.')[1].split('(')[0]
year = full_title.split('(')[1].split(')')[0]
newFilm = Film()
newFilm.title = title
newFilm.rank = rank
newFilm.year = year
newFilm.link = ref_link
film_list.append(newFilm)
driver.quit()
return film_list
# print table
# print driver.page_source.encode("UTF-8")
# for f in get_film_list():
# print "-----------------------"
# print f.rank
# print f.title
# print f.year
# print f.link
# print "-----------------------"
def get_poster_for_each_film(film_list):
if not os.path.exists('film poster'):
os.makedirs('film poster')
for film in film_list[0:2]:
url = "http://www.imdb.com"+film.link
driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'lxml')
poster_div = soup.find('div', class_ = 'poster')
poster_link = poster_div.find('a')['href']
# print poster_link
# print poster_div
# print driver.page_source.encode("UTF-8")
# move to the link and then get the div with the class pswp__zoom-wrap -> find a and get image form second image..
driver.get('http://www.imdb.com'+poster_link)
soup = BeautifulSoup(driver.page_source, 'lxml')
img_div = soup.find_all("div", class_ = 'pswp__zoom-wrap')
temp = 0;
for div in img_div:
temp = temp + 1
img_src = div.find_all('img')[1]['src']
f = open('film poster\{0}{1}.jpg'.format(film.title.encode("UTF-8").replace(':', ''), temp), 'wb')
f.write(requests.get(img_src).content)
print img_src
f.close
driver.quit()
get_poster_for_each_film(get_film_list())
|
[
"suryanshsinghstudy@gmail.com"
] |
suryanshsinghstudy@gmail.com
|
cbca206d1fe04ed45afc6b19d2ee78922ba894ee
|
0d7bd907ddb0cec85566bde344de00073c69cfd0
|
/mos_tests/ceilometer/test_restarts.py
|
2a8f1cdd7840e2a77877b91b26cac9c943734fde
|
[] |
no_license
|
Mirantis/mos-integration-tests
|
bdc8e014a646e05961988a88e8c4f80935bece0f
|
8aced2855b78b5f123195d188c80e27b43888a2e
|
refs/heads/master
| 2020-04-04T07:17:57.570280
| 2017-03-17T11:25:08
| 2017-03-17T11:25:08
| 47,408,971
| 16
| 39
| null | 2020-02-26T11:56:54
| 2015-12-04T14:33:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,542
|
py
|
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
@pytest.mark.testrail_id('1295468')
def test_restart_all_services(env, os_conn, ceilometer_client):
"""Restart all Ceilometer services
Scenario:
1. Boot vm1
2. Check that vm1 meters list is not empty
3. Restart ceilometer services on all controllers
4. Boot vm2
5. Check that vm2 meters list is not empty
"""
internal_net = os_conn.int_networks[0]
instance_keypair = os_conn.create_key(key_name='instancekey')
security_group = os_conn.create_sec_group_for_ssh()
# Boot vm1
vm1 = os_conn.create_server(name='vm1',
availability_zone='nova',
key_name=instance_keypair.name,
nics=[{'net-id': internal_net['id']}],
security_groups=[security_group.id])
query = [dict(field='resource_id', op='eq', value=vm1.id)]
meters = ceilometer_client.meters.list(q=query)
assert len(meters) > 0
# Restart ceilometer services
ceilometer_services_cmd = ("initctl list | grep running | "
"grep ceilometer | awk '{ print $1 }'")
for node in env.get_nodes_by_role('controller'):
with node.ssh() as remote:
output = remote.check_call(ceilometer_services_cmd).stdout_string
for service in output.splitlines():
remote.check_call('service {0} restart'.format(service))
# Boot vm2
vm2 = os_conn.create_server(name='vm2',
availability_zone='nova',
key_name=instance_keypair.name,
nics=[{'net-id': internal_net['id']}],
security_groups=[security_group.id])
query = [dict(field='resource_id', op='eq', value=vm2.id)]
meters = ceilometer_client.meters.list(q=query)
assert len(meters) > 0
|
[
"gdyuldin@mirantis.com"
] |
gdyuldin@mirantis.com
|
a1f3ec33ab1309af2565027fcd90c45383a3935e
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/bssintl_client.py
|
00d5e67c6b5163d718ef1c8ed60c547f9b81f5db
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98,759
|
py
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class BssintlClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(BssintlClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkbssintl.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls, "GlobalCredentials")
if clazz.__name__ != "BssintlClient":
raise TypeError("client type error, support client type is BssintlClient")
return ClientBuilder(clazz, "GlobalCredentials")
def list_conversions(self, request):
"""查询使用量单位进制
功能描述:伙伴在伙伴销售平台上查询使用量单位的进制转换信息,用于不同度量单位之间的转换。
:param ListConversionsRequest request
:return: ListConversionsResponse
"""
return self.list_conversions_with_http_info(request)
def list_conversions_with_http_info(self, request):
"""查询使用量单位进制
功能描述:伙伴在伙伴销售平台上查询使用量单位的进制转换信息,用于不同度量单位之间的转换。
:param ListConversionsRequest request
:return: ListConversionsResponse
"""
all_params = ['x_language', 'measure_type']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'measure_type' in local_var_params:
query_params.append(('measure_type', local_var_params['measure_type']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bases/conversions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListConversionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_free_resource_usages(self, request):
"""查询资源内使用量
功能描述:客户在自建平台查询客户自己的资源包列表
:param ListFreeResourceUsagesRequest request
:return: ListFreeResourceUsagesResponse
"""
return self.list_free_resource_usages_with_http_info(request)
def list_free_resource_usages_with_http_info(self, request):
"""查询资源内使用量
功能描述:客户在自建平台查询客户自己的资源包列表
:param ListFreeResourceUsagesRequest request
:return: ListFreeResourceUsagesResponse
"""
all_params = ['req', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/payments/free-resources/usages/details/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListFreeResourceUsagesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_free_resources(self, request):
"""查询资源包列表
功能描述:查询资源包列表
:param ListFreeResourcesRequest request
:return: ListFreeResourcesResponse
"""
return self.list_free_resources_with_http_info(request)
def list_free_resources_with_http_info(self, request):
"""查询资源包列表
功能描述:查询资源包列表
:param ListFreeResourcesRequest request
:return: ListFreeResourcesResponse
"""
all_params = ['req', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/payments/free-resources/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListFreeResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_measure_units(self, request):
"""查询使用量单位列表
功能描述:伙伴在伙伴销售平台上查询资源使用量的度量单位及名称,度量单位类型等。
:param ListMeasureUnitsRequest request
:return: ListMeasureUnitsResponse
"""
return self.list_measure_units_with_http_info(request)
def list_measure_units_with_http_info(self, request):
"""查询使用量单位列表
功能描述:伙伴在伙伴销售平台上查询资源使用量的度量单位及名称,度量单位类型等。
:param ListMeasureUnitsRequest request
:return: ListMeasureUnitsResponse
"""
all_params = ['x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bases/measurements',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListMeasureUnitsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def change_enterprise_realname_authentication(self, request):
"""申请实名认证变更
功能描述:客户可以进行实名认证变更申请。
:param ChangeEnterpriseRealnameAuthenticationRequest request
:return: ChangeEnterpriseRealnameAuthenticationResponse
"""
return self.change_enterprise_realname_authentication_with_http_info(request)
def change_enterprise_realname_authentication_with_http_info(self, request):
"""申请实名认证变更
功能描述:客户可以进行实名认证变更申请。
:param ChangeEnterpriseRealnameAuthenticationRequest request
:return: ChangeEnterpriseRealnameAuthenticationResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/customers/realname-auths/enterprise',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ChangeEnterpriseRealnameAuthenticationResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_user_identity(self, request):
"""校验客户注册信息
功能描述:客户注册时可检查客户的登录名称、手机号或者邮箱是否可以用于注册。
:param CheckUserIdentityRequest request
:return: CheckUserIdentityResponse
"""
return self.check_user_identity_with_http_info(request)
def check_user_identity_with_http_info(self, request):
"""校验客户注册信息
功能描述:客户注册时可检查客户的登录名称、手机号或者邮箱是否可以用于注册。
:param CheckUserIdentityRequest request
:return: CheckUserIdentityResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/users/check-identity',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckUserIdentityResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_enterprise_realname_authentication(self, request):
"""申请企业实名认证
功能描述:企业客户可以进行企业实名认证申请。
:param CreateEnterpriseRealnameAuthenticationRequest request
:return: CreateEnterpriseRealnameAuthenticationResponse
"""
return self.create_enterprise_realname_authentication_with_http_info(request)
def create_enterprise_realname_authentication_with_http_info(self, request):
"""申请企业实名认证
功能描述:企业客户可以进行企业实名认证申请。
:param CreateEnterpriseRealnameAuthenticationRequest request
:return: CreateEnterpriseRealnameAuthenticationResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/customers/realname-auths/enterprise',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateEnterpriseRealnameAuthenticationResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_personal_realname_auth(self, request):
"""申请个人实名认证
功能描述:个人客户可以进行个人实名认证申请。
:param CreatePersonalRealnameAuthRequest request
:return: CreatePersonalRealnameAuthResponse
"""
return self.create_personal_realname_auth_with_http_info(request)
def create_personal_realname_auth_with_http_info(self, request):
"""申请个人实名认证
功能描述:个人客户可以进行个人实名认证申请。
:param CreatePersonalRealnameAuthRequest request
:return: CreatePersonalRealnameAuthResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/customers/realname-auths/individual',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreatePersonalRealnameAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_sub_customer(self, request):
"""创建客户
功能描述:在伙伴销售平台创建客户时同步创建华为云账号,并将客户在伙伴销售平台上的账号与华为云账号进行映射。同时,创建的华为云账号与伙伴账号关联绑定。华为云伙伴能力中心(一级经销商)可以注册精英服务商伙伴(二级经销商)的子客户。注册完成后,子客户可以自动和精英服务商伙伴绑定。
:param CreateSubCustomerRequest request
:return: CreateSubCustomerResponse
"""
return self.create_sub_customer_with_http_info(request)
def create_sub_customer_with_http_info(self, request):
"""创建客户
功能描述:在伙伴销售平台创建客户时同步创建华为云账号,并将客户在伙伴销售平台上的账号与华为云账号进行映射。同时,创建的华为云账号与伙伴账号关联绑定。华为云伙伴能力中心(一级经销商)可以注册精英服务商伙伴(二级经销商)的子客户。注册完成后,子客户可以自动和精英服务商伙伴绑定。
:param CreateSubCustomerRequest request
:return: CreateSubCustomerResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateSubCustomerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def freeze_sub_customers(self, request):
"""冻结伙伴子客户
功能描述:冻结伙伴子客户
:param FreezeSubCustomersRequest request
:return: FreezeSubCustomersResponse
"""
return self.freeze_sub_customers_with_http_info(request)
def freeze_sub_customers_with_http_info(self, request):
"""冻结伙伴子客户
功能描述:冻结伙伴子客户
:param FreezeSubCustomersRequest request
:return: FreezeSubCustomersResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/freeze',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='FreezeSubCustomersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_customer_on_demand_resources(self, request):
"""查询客户按需资源列表
功能描述:客户在伙伴销售平台查询已开通的按需资源
:param ListCustomerOnDemandResourcesRequest request
:return: ListCustomerOnDemandResourcesResponse
"""
return self.list_customer_on_demand_resources_with_http_info(request)
def list_customer_on_demand_resources_with_http_info(self, request):
"""查询客户按需资源列表
功能描述:客户在伙伴销售平台查询已开通的按需资源
:param ListCustomerOnDemandResourcesRequest request
:return: ListCustomerOnDemandResourcesResponse
"""
all_params = ['req', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/on-demand-resources/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomerOnDemandResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_customerself_resource_record_details(self, request):
"""查询资源详单
功能描述:客户在客户自建平台查询自己的资源详单,用于反映各类资源的消耗情况。
:param ListCustomerselfResourceRecordDetailsRequest request
:return: ListCustomerselfResourceRecordDetailsResponse
"""
return self.list_customerself_resource_record_details_with_http_info(request)
def list_customerself_resource_record_details_with_http_info(self, request):
"""查询资源详单
功能描述:客户在客户自建平台查询自己的资源详单,用于反映各类资源的消耗情况。
:param ListCustomerselfResourceRecordDetailsRequest request
:return: ListCustomerselfResourceRecordDetailsResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/customer-bills/res-records/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomerselfResourceRecordDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_customerself_resource_records(self, request):
"""查询资源消费记录
功能描述:客户在客户自建平台查询每个资源的消费明细数据
:param ListCustomerselfResourceRecordsRequest request
:return: ListCustomerselfResourceRecordsResponse
"""
return self.list_customerself_resource_records_with_http_info(request)
def list_customerself_resource_records_with_http_info(self, request):
"""查询资源消费记录
功能描述:客户在客户自建平台查询每个资源的消费明细数据
:param ListCustomerselfResourceRecordsRequest request
:return: ListCustomerselfResourceRecordsResponse
"""
all_params = ['cycle', 'x_language', 'cloud_service_type', 'region', 'charge_mode', 'bill_type', 'offset', 'limit', 'resource_id', 'enterprise_project_id', 'include_zero_record', 'method', 'sub_customer_id', 'trade_id', 'bill_date_begin', 'bill_date_end']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'cycle' in local_var_params:
query_params.append(('cycle', local_var_params['cycle']))
if 'cloud_service_type' in local_var_params:
query_params.append(('cloud_service_type', local_var_params['cloud_service_type']))
if 'region' in local_var_params:
query_params.append(('region', local_var_params['region']))
if 'charge_mode' in local_var_params:
query_params.append(('charge_mode', local_var_params['charge_mode']))
if 'bill_type' in local_var_params:
query_params.append(('bill_type', local_var_params['bill_type']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'resource_id' in local_var_params:
query_params.append(('resource_id', local_var_params['resource_id']))
if 'enterprise_project_id' in local_var_params:
query_params.append(('enterprise_project_id', local_var_params['enterprise_project_id']))
if 'include_zero_record' in local_var_params:
query_params.append(('include_zero_record', local_var_params['include_zero_record']))
if 'method' in local_var_params:
query_params.append(('method', local_var_params['method']))
if 'sub_customer_id' in local_var_params:
query_params.append(('sub_customer_id', local_var_params['sub_customer_id']))
if 'trade_id' in local_var_params:
query_params.append(('trade_id', local_var_params['trade_id']))
if 'bill_date_begin' in local_var_params:
query_params.append(('bill_date_begin', local_var_params['bill_date_begin']))
if 'bill_date_end' in local_var_params:
query_params.append(('bill_date_end', local_var_params['bill_date_end']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/customer-bills/res-fee-records',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomerselfResourceRecordsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_on_demand_resource_ratings(self, request):
"""查询按需产品价格
功能描述:按需资源询价
:param ListOnDemandResourceRatingsRequest request
:return: ListOnDemandResourceRatingsResponse
"""
return self.list_on_demand_resource_ratings_with_http_info(request)
def list_on_demand_resource_ratings_with_http_info(self, request):
"""查询按需产品价格
功能描述:按需资源询价
:param ListOnDemandResourceRatingsRequest request
:return: ListOnDemandResourceRatingsResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/ratings/on-demand-resources',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListOnDemandResourceRatingsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_order_discounts(self, request):
"""查询订单可用折扣
功能描述:功能介绍客户在伙伴销售平台支付待支付订单时,查询可使用的折扣。只返回商务合同折扣和伙伴授权折扣客户在客户自建平台查看订单可用的优惠券列表。
:param ListOrderDiscountsRequest request
:return: ListOrderDiscountsResponse
"""
return self.list_order_discounts_with_http_info(request)
def list_order_discounts_with_http_info(self, request):
"""查询订单可用折扣
功能描述:功能介绍客户在伙伴销售平台支付待支付订单时,查询可使用的折扣。只返回商务合同折扣和伙伴授权折扣客户在客户自建平台查看订单可用的优惠券列表。
:param ListOrderDiscountsRequest request
:return: ListOrderDiscountsResponse
"""
all_params = ['order_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'order_id' in local_var_params:
query_params.append(('order_id', local_var_params['order_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/order-discounts',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListOrderDiscountsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_postpaid_bill_sum(self, request):
"""查询伙伴月度消费账单
功能描述:伙伴可以查询伙伴月度消费账单
:param ListPostpaidBillSumRequest request
:return: ListPostpaidBillSumResponse
"""
return self.list_postpaid_bill_sum_with_http_info(request)
def list_postpaid_bill_sum_with_http_info(self, request):
"""查询伙伴月度消费账单
功能描述:伙伴可以查询伙伴月度消费账单
:param ListPostpaidBillSumRequest request
:return: ListPostpaidBillSumResponse
"""
all_params = ['bill_cycle']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'bill_cycle' in local_var_params:
query_params.append(('bill_cycle', local_var_params['bill_cycle']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/partner-bills/postpaid-bill-summary',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPostpaidBillSumResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_rate_on_period_detail(self, request):
"""查询包年/包月产品价格
功能描述:客户在自建平台按照条件查询包年/包月产品开通时候的价格
:param ListRateOnPeriodDetailRequest request
:return: ListRateOnPeriodDetailResponse
"""
return self.list_rate_on_period_detail_with_http_info(request)
def list_rate_on_period_detail_with_http_info(self, request):
"""查询包年/包月产品价格
功能描述:客户在自建平台按照条件查询包年/包月产品开通时候的价格
:param ListRateOnPeriodDetailRequest request
:return: ListRateOnPeriodDetailResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/ratings/period-resources/subscribe-rate',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRateOnPeriodDetailResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_resource_types(self, request):
"""查询资源类型列表
功能描述:客户在客户自建平台查询资源类型的列表。
:param ListResourceTypesRequest request
:return: ListResourceTypesResponse
"""
return self.list_resource_types_with_http_info(request)
def list_resource_types_with_http_info(self, request):
"""查询资源类型列表
功能描述:客户在客户自建平台查询资源类型的列表。
:param ListResourceTypesRequest request
:return: ListResourceTypesResponse
"""
all_params = ['x_language', 'resource_type_code']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'resource_type_code' in local_var_params:
query_params.append(('resource_type_code', local_var_params['resource_type_code']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bases/resource-types',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListResourceTypesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_service_resources(self, request):
"""根据云服务类型查询资源列表
功能描述:伙伴在伙伴销售平台根据云服务类型查询关联的资源类型编码和名称,用于查询按需产品的价格或包年/包月产品的价格。
:param ListServiceResourcesRequest request
:return: ListServiceResourcesResponse
"""
return self.list_service_resources_with_http_info(request)
def list_service_resources_with_http_info(self, request):
"""根据云服务类型查询资源列表
功能描述:伙伴在伙伴销售平台根据云服务类型查询关联的资源类型编码和名称,用于查询按需产品的价格或包年/包月产品的价格。
:param ListServiceResourcesRequest request
:return: ListServiceResourcesResponse
"""
all_params = ['service_type_code', 'x_language', 'limit', 'offset']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'service_type_code' in local_var_params:
query_params.append(('service_type_code', local_var_params['service_type_code']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/products/service-resources',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListServiceResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_service_types(self, request):
"""查询云服务类型列表
功能描述:伙伴在伙伴销售平台查询云服务类型的列表。
:param ListServiceTypesRequest request
:return: ListServiceTypesResponse
"""
return self.list_service_types_with_http_info(request)
def list_service_types_with_http_info(self, request):
"""查询云服务类型列表
功能描述:伙伴在伙伴销售平台查询云服务类型的列表。
:param ListServiceTypesRequest request
:return: ListServiceTypesResponse
"""
all_params = ['x_language', 'service_type_code']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'service_type_code' in local_var_params:
query_params.append(('service_type_code', local_var_params['service_type_code']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bases/service-types',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListServiceTypesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_sub_customer_coupons(self, request):
"""查询优惠券列表
功能描述:伙伴可以查询自身的优惠券信息。
:param ListSubCustomerCouponsRequest request
:return: ListSubCustomerCouponsResponse
"""
return self.list_sub_customer_coupons_with_http_info(request)
def list_sub_customer_coupons_with_http_info(self, request):
"""查询优惠券列表
功能描述:伙伴可以查询自身的优惠券信息。
:param ListSubCustomerCouponsRequest request
:return: ListSubCustomerCouponsResponse
"""
all_params = ['coupon_id', 'order_id', 'promotion_plan_id', 'coupon_type', 'status', 'active_start_time', 'active_end_time', 'offset', 'limit', 'source_id', 'indirect_partner_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'coupon_id' in local_var_params:
query_params.append(('coupon_id', local_var_params['coupon_id']))
if 'order_id' in local_var_params:
query_params.append(('order_id', local_var_params['order_id']))
if 'promotion_plan_id' in local_var_params:
query_params.append(('promotion_plan_id', local_var_params['promotion_plan_id']))
if 'coupon_type' in local_var_params:
query_params.append(('coupon_type', local_var_params['coupon_type']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'active_start_time' in local_var_params:
query_params.append(('active_start_time', local_var_params['active_start_time']))
if 'active_end_time' in local_var_params:
query_params.append(('active_end_time', local_var_params['active_end_time']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'source_id' in local_var_params:
query_params.append(('source_id', local_var_params['source_id']))
if 'indirect_partner_id' in local_var_params:
query_params.append(('indirect_partner_id', local_var_params['indirect_partner_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/promotions/benefits/coupons',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSubCustomerCouponsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_sub_customers(self, request):
"""查询客户列表
功能描述:伙伴可以查询合作伙伴的客户信息列表。
:param ListSubCustomersRequest request
:return: ListSubCustomersResponse
"""
return self.list_sub_customers_with_http_info(request)
def list_sub_customers_with_http_info(self, request):
"""查询客户列表
功能描述:伙伴可以查询合作伙伴的客户信息列表。
:param ListSubCustomersRequest request
:return: ListSubCustomersResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSubCustomersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_usage_types(self, request):
"""查询使用量类型列表
功能描述:伙伴在伙伴销售平台查询资源的使用量类型列表。
:param ListUsageTypesRequest request
:return: ListUsageTypesResponse
"""
return self.list_usage_types_with_http_info(request)
def list_usage_types_with_http_info(self, request):
"""查询使用量类型列表
功能描述:伙伴在伙伴销售平台查询资源的使用量类型列表。
:param ListUsageTypesRequest request
:return: ListUsageTypesResponse
"""
all_params = ['x_language', 'resource_type_code', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'resource_type_code' in local_var_params:
query_params.append(('resource_type_code', local_var_params['resource_type_code']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/products/usage-types',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListUsageTypesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def send_verification_message_code(self, request):
"""发送验证码
功能描述:客户注册时,如果填写了手机号,可以向对应的手机发送注册验证码,校验信息的正确性。使用个人银行卡方式进行实名认证时,通过该接口向指定的手机发送验证码。
:param SendVerificationMessageCodeRequest request
:return: SendVerificationMessageCodeResponse
"""
return self.send_verification_message_code_with_http_info(request)
def send_verification_message_code_with_http_info(self, request):
"""发送验证码
功能描述:客户注册时,如果填写了手机号,可以向对应的手机发送注册验证码,校验信息的正确性。使用个人银行卡方式进行实名认证时,通过该接口向指定的手机发送验证码。
:param SendVerificationMessageCodeRequest request
:return: SendVerificationMessageCodeResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bases/verificationcode/send',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SendVerificationMessageCodeResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_customer_account_balances(self, request):
"""查询账户余额
功能描述:查询账户余额
:param ShowCustomerAccountBalancesRequest request
:return: ShowCustomerAccountBalancesResponse
"""
return self.show_customer_account_balances_with_http_info(request)
def show_customer_account_balances_with_http_info(self, request):
"""查询账户余额
功能描述:查询账户余额
:param ShowCustomerAccountBalancesRequest request
:return: ShowCustomerAccountBalancesResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/accounts/customer-accounts/balances',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowCustomerAccountBalancesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_customer_monthly_sum(self, request):
"""查询汇总账单
功能描述:客户在客户自建平台查询自身的消费汇总账单,此账单按月汇总消费数据。
:param ShowCustomerMonthlySumRequest request
:return: ShowCustomerMonthlySumResponse
"""
return self.show_customer_monthly_sum_with_http_info(request)
def show_customer_monthly_sum_with_http_info(self, request):
"""查询汇总账单
功能描述:客户在客户自建平台查询自身的消费汇总账单,此账单按月汇总消费数据。
:param ShowCustomerMonthlySumRequest request
:return: ShowCustomerMonthlySumResponse
"""
all_params = ['bill_cycle', 'service_type_code', 'enterprise_project_id', 'offset', 'limit', 'method', 'sub_customer_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'bill_cycle' in local_var_params:
query_params.append(('bill_cycle', local_var_params['bill_cycle']))
if 'service_type_code' in local_var_params:
query_params.append(('service_type_code', local_var_params['service_type_code']))
if 'enterprise_project_id' in local_var_params:
query_params.append(('enterprise_project_id', local_var_params['enterprise_project_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'method' in local_var_params:
query_params.append(('method', local_var_params['method']))
if 'sub_customer_id' in local_var_params:
query_params.append(('sub_customer_id', local_var_params['sub_customer_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/bills/customer-bills/monthly-sum',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowCustomerMonthlySumResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_realname_authentication_review_result(self, request):
"""查询实名认证审核结果
功能描述:如果实名认证申请或实名认证变更申请的响应中,显示需要人工审核,使用该接口查询审核结果。
:param ShowRealnameAuthenticationReviewResultRequest request
:return: ShowRealnameAuthenticationReviewResultResponse
"""
return self.show_realname_authentication_review_result_with_http_info(request)
def show_realname_authentication_review_result_with_http_info(self, request):
"""查询实名认证审核结果
功能描述:如果实名认证申请或实名认证变更申请的响应中,显示需要人工审核,使用该接口查询审核结果。
:param ShowRealnameAuthenticationReviewResultRequest request
:return: ShowRealnameAuthenticationReviewResultResponse
"""
all_params = ['customer_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'customer_id' in local_var_params:
query_params.append(('customer_id', local_var_params['customer_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/customers/realname-auths/result',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRealnameAuthenticationReviewResultResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_sub_customer_budget(self, request):
"""查询客户预算
功能描述:查询客户预算
:param ShowSubCustomerBudgetRequest request
:return: ShowSubCustomerBudgetResponse
"""
return self.show_sub_customer_budget_with_http_info(request)
def show_sub_customer_budget_with_http_info(self, request):
"""查询客户预算
功能描述:查询客户预算
:param ShowSubCustomerBudgetRequest request
:return: ShowSubCustomerBudgetResponse
"""
all_params = ['customer_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'customer_id' in local_var_params:
query_params.append(('customer_id', local_var_params['customer_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/budget',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowSubCustomerBudgetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def unfreeze_sub_customers(self, request):
"""解冻伙伴子客户
功能描述:解冻伙伴子客户
:param UnfreezeSubCustomersRequest request
:return: UnfreezeSubCustomersResponse
"""
return self.unfreeze_sub_customers_with_http_info(request)
def unfreeze_sub_customers_with_http_info(self, request):
"""解冻伙伴子客户
功能描述:解冻伙伴子客户
:param UnfreezeSubCustomersRequest request
:return: UnfreezeSubCustomersResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/unfreeze',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UnfreezeSubCustomersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_period_to_on_demand(self, request):
"""设置或者取消包年/包月资源到期转按需
功能描述:客户可以设置包年/包月资源到期后转为按需资源计费。包年/包月计费模式到期后,按需的计费模式即生效
:param UpdatePeriodToOnDemandRequest request
:return: UpdatePeriodToOnDemandResponse
"""
return self.update_period_to_on_demand_with_http_info(request)
def update_period_to_on_demand_with_http_info(self, request):
"""设置或者取消包年/包月资源到期转按需
功能描述:客户可以设置包年/包月资源到期后转为按需资源计费。包年/包月计费模式到期后,按需的计费模式即生效
:param UpdatePeriodToOnDemandRequest request
:return: UpdatePeriodToOnDemandResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/subscriptions/resources/to-on-demand',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePeriodToOnDemandResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_sub_customer_budget(self, request):
"""设置客户预算
功能描述:设置客户预算
:param UpdateSubCustomerBudgetRequest request
:return: UpdateSubCustomerBudgetResponse
"""
return self.update_sub_customer_budget_with_http_info(request)
def update_sub_customer_budget_with_http_info(self, request):
"""设置客户预算
功能描述:设置客户预算
:param UpdateSubCustomerBudgetRequest request
:return: UpdateSubCustomerBudgetResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/partners/sub-customers/budget',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateSubCustomerBudgetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def auto_renewal_resources(self, request):
"""设置包年/包月资源自动续费
功能描述:客户可以设置包年/包月资源到期后转为按需资源计费
:param AutoRenewalResourcesRequest request
:return: AutoRenewalResourcesResponse
"""
return self.auto_renewal_resources_with_http_info(request)
def auto_renewal_resources_with_http_info(self, request):
"""设置包年/包月资源自动续费
功能描述:客户可以设置包年/包月资源到期后转为按需资源计费
:param AutoRenewalResourcesRequest request
:return: AutoRenewalResourcesResponse
"""
all_params = ['resource_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/subscriptions/resources/autorenew/{resource_id}',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AutoRenewalResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_auto_renewal_resources(self, request):
"""取消包年/包月资源自动续费
功能描述:取消包年/包月资源自动续费
:param CancelAutoRenewalResourcesRequest request
:return: CancelAutoRenewalResourcesResponse
"""
return self.cancel_auto_renewal_resources_with_http_info(request)
def cancel_auto_renewal_resources_with_http_info(self, request):
"""取消包年/包月资源自动续费
功能描述:取消包年/包月资源自动续费
:param CancelAutoRenewalResourcesRequest request
:return: CancelAutoRenewalResourcesResponse
"""
all_params = ['resource_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/subscriptions/resources/autorenew/{resource_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelAutoRenewalResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_customer_order(self, request):
"""取消待支付订单
功能描述:客户可以对待支付的订单进行取消操作
:param CancelCustomerOrderRequest request
:return: CancelCustomerOrderResponse
"""
return self.cancel_customer_order_with_http_info(request)
def cancel_customer_order_with_http_info(self, request):
"""取消待支付订单
功能描述:客户可以对待支付的订单进行取消操作
:param CancelCustomerOrderRequest request
:return: CancelCustomerOrderResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/cancel',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelCustomerOrderResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_resources_subscription(self, request):
"""退订包年/包月资源
功能描述:客户购买包年/包月资源后,支持客户退订包年/包月实例。退订资源实例包括资源续费部分和当前正在使用的部分,退订后资源将无法使用
:param CancelResourcesSubscriptionRequest request
:return: CancelResourcesSubscriptionResponse
"""
return self.cancel_resources_subscription_with_http_info(request)
def cancel_resources_subscription_with_http_info(self, request):
"""退订包年/包月资源
功能描述:客户购买包年/包月资源后,支持客户退订包年/包月实例。退订资源实例包括资源续费部分和当前正在使用的部分,退订后资源将无法使用
:param CancelResourcesSubscriptionRequest request
:return: CancelResourcesSubscriptionResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/subscriptions/resources/unsubscribe',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelResourcesSubscriptionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_customer_orders(self, request):
"""查询订单列表
功能描述:客户购买包年包月资源后,可以查看待审核、处理中、已取消、已完成和待支付等状态的订单
:param ListCustomerOrdersRequest request
:return: ListCustomerOrdersResponse
"""
return self.list_customer_orders_with_http_info(request)
def list_customer_orders_with_http_info(self, request):
"""查询订单列表
功能描述:客户购买包年包月资源后,可以查看待审核、处理中、已取消、已完成和待支付等状态的订单
:param ListCustomerOrdersRequest request
:return: ListCustomerOrdersResponse
"""
all_params = ['order_id', 'customer_id', 'create_time_begin', 'create_time_end', 'service_type_code', 'status', 'order_type', 'limit', 'offset', 'order_by', 'payment_time_begin', 'payment_time_end', 'indirect_partner_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'order_id' in local_var_params:
query_params.append(('order_id', local_var_params['order_id']))
if 'customer_id' in local_var_params:
query_params.append(('customer_id', local_var_params['customer_id']))
if 'create_time_begin' in local_var_params:
query_params.append(('create_time_begin', local_var_params['create_time_begin']))
if 'create_time_end' in local_var_params:
query_params.append(('create_time_end', local_var_params['create_time_end']))
if 'service_type_code' in local_var_params:
query_params.append(('service_type_code', local_var_params['service_type_code']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'order_type' in local_var_params:
query_params.append(('order_type', local_var_params['order_type']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'order_by' in local_var_params:
query_params.append(('order_by', local_var_params['order_by']))
if 'payment_time_begin' in local_var_params:
query_params.append(('payment_time_begin', local_var_params['payment_time_begin']))
if 'payment_time_end' in local_var_params:
query_params.append(('payment_time_end', local_var_params['payment_time_end']))
if 'indirect_partner_id' in local_var_params:
query_params.append(('indirect_partner_id', local_var_params['indirect_partner_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomerOrdersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_order_coupons_by_order_id(self, request):
"""查询订单可用优惠券
功能描述:客户在客户自建平台查看订单可用的优惠券列表
:param ListOrderCouponsByOrderIdRequest request
:return: ListOrderCouponsByOrderIdResponse
"""
return self.list_order_coupons_by_order_id_with_http_info(request)
def list_order_coupons_by_order_id_with_http_info(self, request):
"""查询订单可用优惠券
功能描述:客户在客户自建平台查看订单可用的优惠券列表
:param ListOrderCouponsByOrderIdRequest request
:return: ListOrderCouponsByOrderIdResponse
"""
all_params = ['order_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'order_id' in local_var_params:
query_params.append(('order_id', local_var_params['order_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/order-coupons',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListOrderCouponsByOrderIdResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_pay_per_use_customer_resources(self, request):
"""查询客户包年/包月资源列表
功能描述:客户在客户自建平台查询某个或所有的包年/包月资源
:param ListPayPerUseCustomerResourcesRequest request
:return: ListPayPerUseCustomerResourcesResponse
"""
return self.list_pay_per_use_customer_resources_with_http_info(request)
def list_pay_per_use_customer_resources_with_http_info(self, request):
"""查询客户包年/包月资源列表
功能描述:客户在客户自建平台查询某个或所有的包年/包月资源
:param ListPayPerUseCustomerResourcesRequest request
:return: ListPayPerUseCustomerResourcesResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/suscriptions/resources/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPayPerUseCustomerResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_resource_usages(self, request):
"""查询套餐内使用量
功能描述:客户在客户自建平台查询套餐内的使用量
:param ListResourceUsagesRequest request
:return: ListResourceUsagesResponse
"""
return self.list_resource_usages_with_http_info(request)
def list_resource_usages_with_http_info(self, request):
"""查询套餐内使用量
功能描述:客户在客户自建平台查询套餐内的使用量
:param ListResourceUsagesRequest request
:return: ListResourceUsagesResponse
"""
all_params = ['x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/payments/free-resources/usages/query',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListResourceUsagesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def pay_orders(self, request):
"""支付包年/包月产品订单
功能描述:客户可以对待支付状态的包年/包月产品订单进行支付
:param PayOrdersRequest request
:return: PayOrdersResponse
"""
return self.pay_orders_with_http_info(request)
def pay_orders_with_http_info(self, request):
"""支付包年/包月产品订单
功能描述:客户可以对待支付状态的包年/包月产品订单进行支付
:param PayOrdersRequest request
:return: PayOrdersResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/pay',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='PayOrdersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def renewal_resources(self, request):
"""续订包年/包月资源
功能描述:客户的包年包/月资源即将到期时,可进行包年/包月资源的续订
:param RenewalResourcesRequest request
:return: RenewalResourcesResponse
"""
return self.renewal_resources_with_http_info(request)
def renewal_resources_with_http_info(self, request):
"""续订包年/包月资源
功能描述:客户的包年包/月资源即将到期时,可进行包年/包月资源的续订
:param RenewalResourcesRequest request
:return: RenewalResourcesResponse
"""
all_params = ['req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/subscriptions/resources/renew',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RenewalResourcesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_customer_order_details(self, request):
"""查询订单详情
功能描述:客户可以查看订单详情
:param ShowCustomerOrderDetailsRequest request
:return: ShowCustomerOrderDetailsResponse
"""
return self.show_customer_order_details_with_http_info(request)
def show_customer_order_details_with_http_info(self, request):
"""查询订单详情
功能描述:客户可以查看订单详情
:param ShowCustomerOrderDetailsRequest request
:return: ShowCustomerOrderDetailsResponse
"""
all_params = ['order_id', 'x_language', 'limit', 'offset', 'indirect_partner_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id']
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'indirect_partner_id' in local_var_params:
query_params.append(('indirect_partner_id', local_var_params['indirect_partner_id']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/details/{order_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowCustomerOrderDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_refund_order_details(self, request):
"""查询退款订单的金额详情
功能描述:客户在伙伴销售平台查询某次退订订单或者降配订单的退款金额来自哪些资源和对应订单
:param ShowRefundOrderDetailsRequest request
:return: ShowRefundOrderDetailsResponse
"""
return self.show_refund_order_details_with_http_info(request)
def show_refund_order_details_with_http_info(self, request):
"""查询退款订单的金额详情
功能描述:客户在伙伴销售平台查询某次退订订单或者降配订单的退款金额来自哪些资源和对应订单
:param ShowRefundOrderDetailsRequest request
:return: ShowRefundOrderDetailsResponse
"""
all_params = ['order_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'order_id' in local_var_params:
query_params.append(('order_id', local_var_params['order_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/orders/customer-orders/refund-orders',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRefundOrderDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e0257d3630a0744a11c1f8fc9a29987008941d74
|
10443fb39a7e8e85f0939276f83f3b5fdddcb1af
|
/manage.py
|
0fa869a70e41e61ece5daa12d1663e92cc26a058
|
[] |
no_license
|
antdh2/EOShopifyStock
|
51410e987c070caee9f9f42254682da31c7a890f
|
b799edf09d9bdef9db5120afc95409fafc6b3893
|
refs/heads/master
| 2021-07-17T17:33:48.342156
| 2017-10-23T13:15:02
| 2017-10-23T13:15:02
| 107,971,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shopifystock.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"antdh2@gmail.com"
] |
antdh2@gmail.com
|
b0cb2fcad64f1d8bc01c179b0c918d51cdc79715
|
ba17b547389c3e38e845aae65dd8fce633a0c4d6
|
/mtsp_solver.py
|
feadb28467ca215a7a15ac049bf66bf0fe70d8cb
|
[] |
no_license
|
ShivinDass/Multi-Agent-TSP-with-ACO
|
ff2b205da62b1d316841774967b4a78007f1f44f
|
46d868c7068127d5de47a5a2750cb533a199a246
|
refs/heads/master
| 2021-09-23T07:30:18.425383
| 2021-09-17T01:04:04
| 2021-09-17T01:04:04
| 225,697,267
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,784
|
py
|
import pygame,random,copy,math,sys
display_width=500
display_height=500
pygame.init()
gameDisplay=pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Game Window')
clock=pygame.time.Clock()
#######COLOR DEFINITION#########
Red=(255,0,0)
Blue=(0,0,255)
Green=(0,255,0)
Black=(0,0,0)
Yellow=(255,255,0)
################################
class group:
def __init__(self):
self.members=[]
self.targetsNotVisited=set([])
def addAgent(self,A):
self.members.append(A)
def initializeTSet(self):
for i in range(numberTargets):
self.targetsNotVisited.add(i)
def totalLength(self):
S=0
for j in range(numberDepots):
S+=self.members[j].lengthTravelled()
return S
class Agent:
def __init__(self,startSpot):
self.radius=5
self.startSpot=startSpot
self.x=depots[startSpot].x
self.y=depots[startSpot].y
self.path=[startSpot+numberTargets]
self.fuel=depots[startSpot].fuel
def update(self,x_pos,y_pos):
self.x=x_pos
self.y=y_pos
def display(self):
pygame.draw.circle(gameDisplay,Green,(self.x,self.y),self.radius)
def reset(self):
self.x=depots[self.startSpot].x
self.y=depots[self.startSpot].y
self.path=[self.startSpot+numberTargets]
self.fuel=depots[self.startSpot].fuel
def addToPath(self,t):
self.path.append(t)
def getCurrentPos(self):
return self.path[len(self.path)-1]
def lengthTravelled(self):
totalD=0
for i in range(len(self.path)-1):
totalD+=distance[min(self.path[i],self.path[i+1])][max(self.path[i],self.path[i+1])]
return totalD
class targ:
def __init__(self,x,y):
self.radius=10
self.x=x
self.y=y
def display(self):
pygame.draw.circle(gameDisplay,Red,(self.x,self.y),self.radius)
def dist(self, loc):
return ((self.x-loc.x)**2+(self.y-loc.y)**2)**0.5
class depo:
def __init__(self,x,y,f):
self.radius=10
self.x=x
self.y=y
self.fuel=f
#self.agents=[]
def display(self):
pygame.draw.circle(gameDisplay,Blue,(self.x,self.y),self.radius)
def dist(self, loc):
return ((self.x-loc.x)**2+(self.y-loc.y)**2)**0.5
def initialize_targets(numberTargets):
t=[]
i=0
while i<numberTargets:
X=targ(random.randrange(10,display_width-10),random.randrange(10,display_height-10))
flag=1
for j in range(len(t)):
if X.dist(t[j])<30:
flag=0
break
if flag==1:
t.append(copy.deepcopy(X))
i+=1
return t;
def initialize_depots(numberDepots):
d=[]
f=500
i=0
while i<numberDepots:
X=depo(random.randrange(10,display_width-10),random.randrange(10,display_height-10),800)#random.randrange(500,600))
flag=1
for j in range(len(targets)):
if X.dist(targets[j])<30:
flag=0
break
for j in range(len(d)):
if X.dist(d[j])<30:
flag=0
break
if flag==1:
d.append(copy.deepcopy(X))
i+=1
return d;
def drawMap():
for i in range(numberTargets+numberDepots):
for j in range(numberTargets+numberDepots):
if j>i:
concentration=math.floor(0.000001*(pheromone[i][j]**3)*distance[i][j])+1
x1=targAndDep[i].x
y1=targAndDep[i].y
x2=targAndDep[j].x
y2=targAndDep[j].y
x3=(x2-x1)/concentration
y3=(y2-y1)/concentration
for k in range(0,concentration):
pygame.draw.circle(gameDisplay,Yellow,(math.floor(k*x3+x1),math.floor(k*y3+y1)),1)
for i in range(numberTargets):
targets[i].display()
for i in range(numberDepots):
depots[i].display()
def resetPositions():
for i in range(numberAgents):
for j in range(numberDepots):
Agents[i].members[j].reset()
def updatePheromone():
Q=1000*numberDepots
phro=0.5
delta_pheromone=[]
delta_stayProb=[0]*(numberDepots+numberTargets)
for i in range(numberTargets+numberDepots):
X=[0]*(numberDepots+numberTargets)
delta_pheromone.append(X)
for i in range(numberAgents):
totalL=Agents[i].totalLength()
totalL+=200*len(Agents[i].targetsNotVisited)
q=Q/totalL
for j in range(numberDepots):
if Agents[i].members[j].lengthTravelled()<=0:
continue
path=Agents[i].members[j].path
prev=-1
for k in range(len(path)-1):
if not path[k]==path[k+1]:
delta_pheromone[min(path[k],path[k+1])][max(path[k],path[k+1])]+=q
else:
delta_stayProb[path[k]]+=q
prev=path[k]
for i in range(numberTargets+numberDepots):
for j in range(i,numberTargets+numberDepots):
pheromone[i][j]=phro*pheromone[i][j]+delta_pheromone[i][j]
stayProb[i]=phro*stayProb[i]+delta_stayProb[i]
return;
def choose(i,j,degrade):
mew=0.7
if len(Agents[i].targetsNotVisited)>0:
S=0
current=Agents[i].members[j].getCurrentPos()
D=1000
for k in Agents[i].targetsNotVisited:
S+=(pheromone[min(current,k)][max(current,k)]/((distance[min(current,k)][max(current,k)])**mew))
D=min(D,distance[min(current,k)][max(current,k)])
S+=degrade*(stayProb[current])/(D**mew)
r=random.random()
p=0
for k in Agents[i].targetsNotVisited:
p+=(pheromone[min(current,k)][max(current,k)]/(distance[min(current,k)][max(current,k)]**mew))/S
if r<=p:
if Agents[i].members[j].fuel<distance[min(current,k)][max(current,k)]:
#######REPLACE THIS LOOP#########
checkOtherCityAvailable=False
for kk in Agents[i].targetsNotVisited:
if Agents[i].members[j].fuel>distance[min(current,kk)][max(current,kk)]:
checkOtherCityAvailable=True
break
if not checkOtherCityAvailable:
fuelFinished[j]=1
Agents[i].members[j].addToPath(current)
##################################
if current<numberTargets:
return targets[current]
else:
return depots[current-numberTargets]
else:
Agents[i].targetsNotVisited.remove(k)
Agents[i].members[j].addToPath(k)
Agents[i].members[j].fuel-=distance[min(current,k)][max(current,k)]
return targets[k]
Agents[i].members[j].addToPath(current)
if current<numberTargets:
return targets[current]
else:
return depots[current-numberTargets]
return targ(-1,-1)
def game_loop(duration, group):
quit=False
xinit=[]
yinit=[]
x=[]
y=[]
for j in range(numberDepots):
xinit.append(Agents[group].members[j].x)
yinit.append(Agents[group].members[j].y)
if choice[j].x>=0:
x.append((choice[j].x-Agents[group].members[j].x)/duration)
y.append((choice[j].y-Agents[group].members[j].y)/duration)
else:
x.append(0)
y.append(0)
for t in range(duration):
for event in pygame.event.get():
if event.type==pygame.QUIT:
quit=True
pygame.quit()
if quit:
break
gameDisplay.fill(Black)
#######################DISPLAY########################
drawMap()
for j in range(numberDepots):
Agents[group].members[j].update(math.floor(x[j]*t+xinit[j]),math.floor(y[j]*t+yinit[j]))
Agents[group].members[j].display()
pygame.display.update()
clock.tick(1000)
seed=5
if len(sys.argv)>1:
seed=int(sys.argv[1])
random.seed(seed)
numberTargets=15
numberDepots=4
numberAgents=50
randomizeDestinations=1#int(input("Press 1 to randomize:"))
targets=[]
depots=[]
if randomizeDestinations==1:
targets=initialize_targets(numberTargets)
depots=initialize_depots(numberDepots)
else:
#A=[[0,0],[1,0],[2,0]]
#T=[[1,2],[0,3],[3,3],[1,5],[4,5],[-2,4]]
A= [[1, -5], [-3, 3], [4, 4]]
T= [[0, -2], [0, 4], [-1, 0], [-2, 4], [4, 5], [-1, 5]]
numberDepots=len(A)
numberTargets=len(T)
scale=50
for i in T:
targets.append(targ(i[0]*scale+math.floor(display_width/2),i[1]*scale+math.floor(display_height/2)))
for i in A:
depots.append(depo(i[0]*scale+math.floor(display_width/2),i[1]*scale+math.floor(display_height/2),10*scale))
Agents=[]
print()
for i in range(numberAgents):
g=group()
for j in range(numberDepots):
X=Agent(j)
g.addAgent(X)
Agents.append(g)
targAndDep=[]
for i in range(numberTargets):
targAndDep.append(targets[i])
for i in range(numberDepots):
targAndDep.append(depots[i])
pheromone=[]
stayProb=[1]*(numberTargets+numberDepots)
for i in range(numberTargets+numberDepots):
X=[1]*(numberDepots+numberTargets)
pheromone.append(X)
distance=[]
for i in range(numberTargets+numberDepots):
X=[]
for j in range(numberDepots+numberTargets):
X.append(targAndDep[i].dist(targAndDep[j]))
distance.append(X)
fuelFinished=[0]*numberDepots
while 1>0:
count=0
for i in range(numberAgents):
Agents[i].initializeTSet()
deg=1
exit=False
fuelFinished=[0]*numberDepots
while not exit:
if len(Agents[i].targetsNotVisited)<=0 or sum(fuelFinished)==numberDepots:
exit=True
count+=1
bef=len(Agents[i].targetsNotVisited)
choice=[]
for j in range(numberDepots):
choice.append(choose(i,j,deg))
if count%350==0:
count=0
game_loop(1,i)
if len(Agents[i].targetsNotVisited)==bef:
deg=0.8*deg
else:
deg=1
for a in Agents[0].members:
print(a.fuel,end=' ')
print()
updatePheromone()
resetPositions()
pygame.quit()
|
[
"shivin16091@iiitd.ac.in"
] |
shivin16091@iiitd.ac.in
|
5d398e23330acab6248df9a9813cc727efae92e7
|
818063a2d41bded89e0b31df70f1616700975f71
|
/src/spark-sql.py
|
1cc68db7f3c6b153da22785613d5d5b2d550751d
|
[] |
no_license
|
Cohey0727/hello_spark
|
17ff924aae928c33522e46ee8a61eca1fb91f37d
|
880995d68cd7e637d8c5e7bf37208ec567b43f45
|
refs/heads/master
| 2022-12-09T21:46:46.479944
| 2020-09-12T14:33:19
| 2020-09-12T14:33:19
| 293,216,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from pyspark.sql import SparkSession
from pyspark.sql import Row
# Create a SparkSession
spark = SparkSession.builder.appName("SparkSQL").getOrCreate()
def mapper(line):
fields = line.split(',')
return Row(ID=int(fields[0]), name=str(fields[1]), \
age=int(fields[2]), numFriends=int(fields[3]))
lines = spark.sparkContext.textFile("./dataset/fakefriends.csv")
people = lines.map(mapper)
print(type(people))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people).cache()
schemaPeople.createOrReplaceTempView("people1")
schemaPeople.createOrReplaceTempView("people2")
from pyspark.sql.types import IntegerType
def square(x):
return x*x
spark.udf.register('square', square, IntegerType())
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT * FROM people WHERE age >= 10 AND age <= 19")
# teenagers = spark.sql("SELECT age, square(age) as age_square from people1 where age >= 10 and age <=19")
# The results of SQL queries are RDDs and support all the normal RDD operations.
for teen in teenagers.collect():
print(teen[0])
print(teen.age)
# We can also use functions instead of SQL queries:
schemaPeople.groupBy("age").count().orderBy("age").show()
spark.stop()
|
[
"ohayousagi.ac.kook0727@gmail.com"
] |
ohayousagi.ac.kook0727@gmail.com
|
76966aa3a8bf0a89e42edf8ac15aa8fde7390e66
|
a2d616c6725670901bdc9a87914211535f10ca11
|
/hatapp/hatapp.py
|
4173e7aa3954a7b5eed080483f636ecee965b9c9
|
[] |
no_license
|
mjmdavis/thinking-hat
|
07751f6e4b195161193dbf815aa53ed0772e4fa3
|
23aef9bc5f18ba45d7e47e9dd083e7cce2717271
|
refs/heads/master
| 2022-12-11T22:34:41.338856
| 2018-04-17T22:18:03
| 2018-04-17T22:18:03
| 129,388,938
| 2
| 0
| null | 2022-12-08T00:58:30
| 2018-04-13T10:46:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,099
|
py
|
#!/usr/bin/env python3
from kivy.config import Config
Config.set('graphics', 'fullscreen', 'auto')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.uix.gridlayout import GridLayout
from kivy.uix.layout import Layout
from kivy.uix.image import Image
from kivy.uix.carousel import Carousel
from kivy.graphics.texture import Texture
from kivy.graphics import *
from kivy.clock import Clock
import glob
from kivy.uix.screenmanager import ScreenManager, Screen
import subprocess
class HatApp(App):
def build(self):
self.screens = {}
self.available_screens = ['Slideshow', 'SeeSay']
self.sp = None
self.oldies_but_happies = "mpg123 /home/pi/oldies_but_happies.mp3"
self.wintergartan = "mpg123 /home/pi/wintergartan.mpeg"
self.candyman = "mpg123 /home/pi/candy_man.mp3"
# self.image_slideshow = "DISPLAY=:0 feh -FZz -D1 --auto-rotate /home/pi/Pictures/*"
self.image_slideshow = """DISPLAY=:0 feh -FZz -D1 --auto-rotate /home/pi/Pictures/* &
echo '"pkill -n feh; pkill -n xbindkeys"'>xbindkeys.temp
echo "b:1">>xbindkeys.temp
xbindkeys -n -f xbindkeys.temp
rm xbindkeys.temp
"""
# return MainScreen()
def open_screen(self, screen):
self.root.ids.im.switch_to(self.load_screen(screen), direction='left')
def load_screen(self, screen):
if screen in self.screens:
return self.screens[screen]
screen_obj = Builder.load_file(screen)
self.screens[screen] = screen_obj
return screen_obj
def run_subprocess(self, command: str) -> subprocess.Popen:
if self.sp is not None:
try:
self.sp.kill()
except:
pass
self.sp = subprocess.Popen(["bash", "-c", command])
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from tensorflow import keras
import os
import json
import tempfile
from functools import partial
from kivy.core.text import Label as CoreLabel
def main_menu():
App.get_running_app().root.current = 'menu'
class ShowcaseScreen(Screen):
img_paths = glob.glob('/home/pi/Pictures/*.jpg') + glob.glob('/home/pi/Pictures/*.jpg')
camera = None
iv3 = None
api_data = None
image_widget = None
def on_pre_enter(self):
self.canvas.clear()
Clock.schedule_once(lambda x: self.cap_and_display(), 1)
def cap_and_display(self):
print('capturing image')
self.image_widget = Image()
texture = Texture.create(size=(800,480))
self.im = self.get_image()
texture.blit_buffer(self.im.tostring(), bufferfmt='ubyte', colorfmt='rgb')
with self.canvas:
Rectangle(texture=texture, pos=self.pos, size=(800,480))
self.canvas.ask_update()
Clock.schedule_once(lambda x: self.infer_and_speak(), 0.1)
def infer_and_speak(self):
scaled_im = self.scale_image(self.im)
name = self.predict(scaled_im)
stimulus, response = self.category_to_thought(name)
string_to_speak = ". When I see {}, I think of {}.".format(stimulus, response)
string_to_speak = string_to_speak.replace('_', ' ')
self.say_something(string_to_speak)
self.annotate_image(string_to_speak)
Clock.schedule_once(lambda x: main_menu(), 7)
def annotate_image(self, string):
top_text, bot_text = string.split(',')
top_label = CoreLabel(text=top_text, font_size=50, text_size=(800, 80), halign='center', valign='top', outline_width=10, outline_color=(50,50,50))
bot_label = CoreLabel(text=bot_text, font_size=50, text_size=(800, 80), halign='center', valign='bottom', outline_width=10)
top_label.refresh()
bot_label.refresh()
top_tex = top_label.texture
bot_tex = bot_label.texture
with self.canvas:
Color(0,0,0)
Rectangle(size=top_tex.size, pos=(0,400))
Rectangle(size=bot_tex.size, pos=(0,0))
Color(255,255,255)
Rectangle(size=top_tex.size, pos=(0,400), texture=top_tex)
Rectangle(size=bot_tex.size, pos=(0,0), texture=bot_tex)
pass
def see_say(self):
self.image_widget = Image()
texture = Texture.create(size=(800,480))
self.im = self.get_image()
texture.blit_buffer(self.im.tostring(), bufferfmt='ubyte', colorfmt='rgb')
with self.canvas:
Rectangle(texture=texture, pos=self.pos, size=(800,480))
scaled_im = self.scale_image(self.im)
name = self.predict(scaled_im)
stimulus, response = self.category_to_thought(name)
string_to_speak = ". When I see {}, I think of {}.".format(stimulus, response)
string_to_speak = string_to_speak.replace('_', ' ')
self.say_something(string_to_speak)
Clock.schedule_once(lambda x: main_menu(), 5)
def get_image(self):
if self.camera is None:
self.camera = PiCamera()
rawCapture = PiRGBArray(self.camera)
self.camera.capture(rawCapture, format="rgb")
image = rawCapture.array
return image
def scale_image(self, image):
small_image = cv2.resize(image[0:480, 160:640], dsize=(299,299))
small_image_f32 = np.float32(small_image)
small_image_f32_r3 = np.expand_dims(small_image_f32, 0)
scaled_image = small_image_f32_r3/255
return scaled_image
def predict_image(self, im):
if self.iv3 is None:
self.iv3 = keras.applications.InceptionV3(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000
)
prediction = self.iv3.predict(im)
return keras.applications.inception_v3.decode_predictions(prediction)
def predict(self, im):
prediction = self.predict_image(im)
name = prediction[0][1][1]
return name
def say_something(self, text):
number, fname = tempfile.mkstemp(suffix='.wav')
command = 'pico2wave --wave={} -len-GB "{}" && aplay {} && rm {}'.format(
fname,
text,
fname,
fname
)
subprocess.Popen(['bash', '-c', command])
def category_to_thought(self, category):
if self.api_data is None:
with open('./joern_api_responses.json', 'r') as f:
self.thought_responses = json.load(f)
retval = None
try:
retval = self.thought_responses[category]
except:
retval = [category, "Beer"]
return retval
class MyScreenManager(ScreenManager):
def new_color_screen(self):
s = Screen(name=name)
self.add_widget(s)
self.current = name
class MenuScreen(Screen):
pass
if __name__ == '__main__':
HatApp().run()
|
[
"dev@joernhees.de"
] |
dev@joernhees.de
|
d123820df06d89c3354218e181ceff86851d1e77
|
7f5359b2f7b5148bc5687d505945db595c842659
|
/rigging the game v2.py
|
5c78da3abfa44210da9c639727945c83abdd7039
|
[] |
no_license
|
FrontEnd404/CIS1501-Fall2020
|
d873c4326f17bae07a79ee01c0d81d9560c8f3cd
|
405bfbfeca79c843a868744d47383b3ecb4be3ca
|
refs/heads/master
| 2023-02-03T04:28:24.172759
| 2020-12-23T01:07:03
| 2020-12-23T01:07:03
| 297,189,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
import random
total_winnings = 0
total_spent = 0
print("only whole numbers 1-9 are allowed")
print("input numbers one at a time")
player_choose = input("choose y to input your number r to get random numbers and x to stop playing")
while player_choose == "y" or player_choose == "r":
# PICKS WINNING NUMBERS
i = 0
winning_number = []
while i < 4:
i += 1
winning_number.append(random.randint(1, 9))
if player_choose == "y":
# Get user numbers
i = 0
user_number = []
while i < 4:
number = input("input a number")
try:
number = int(number)
except ValueError:
print("that is not a valid number please input a new one")
continue
if number >= 1 and number <= 9:
user_number.append(number)
i += 1
else:
print("number must be between 1 and 9")
continue
else:
# get random numbers
i = 0
user_number = []
while i < 4:
i += 1
user_number.append(random.randint(1, 9))
# Process numbers
print("the winning numbers are")
print(winning_number)
print("your numbers are")
print(user_number)
if user_number == winning_number:
print("you win")
total_spent = total_spent + 1
total_winnings = total_winnings + 5000
elif user_number[1] == winning_number[1] and user_number[2] == winning_number[2] and (user_number[0] == winning_number[0] or user_number[3] == winning_number[3]):
print("you got a one off win")
total_spent = total_spent + 1
total_winnings = total_winnings + 275
print("your total winnings are $" + str(total_winnings) + " and you spent a total of $" + str(total_spent))
print("your net loss is")
print(total_winnings - total_spent)
else:
print("you did not win")
total_spent = total_spent + 1
print("your total winnings are $" + str(total_winnings) + " and you spent a total of $" + str(total_spent))
print("your net loss is")
print(total_winnings - total_spent)
player_choose = input("choose y to input your number r to get random numbers and x to stop playing")
print("thanks for playing")
|
[
"noreply@github.com"
] |
FrontEnd404.noreply@github.com
|
72c30e6c08374d33d8ff0572d4b267013d5ee46f
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/Domain_Lost_Data/DomainRedis/spiders/org_demo.py
|
7ce4603fab6be754b7df7f213c361937207a52e3
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380
| 2019-11-15T13:16:21
| 2019-11-15T13:16:21
| 226,297,631
| 0
| 1
| null | 2019-12-06T09:55:37
| 2019-12-06T09:55:36
| null |
UTF-8
|
Python
| false
| false
| 2,381
|
py
|
from scrapy_redis.spiders import RedisSpider
import re
from xpinyin import Pinyin
from scrapy.spidermiddlewares.httperror import HttpError, logger
from twisted.internet.error import *
import scrapy
from DomainRedis.items import DomaintestItem
import redis
connect = redis.Redis(host='127.0.0.1', port=6379, db=15)
keyword_key = 'org_keyword_key'
fail_url = 'fail_url'
class MySpider(RedisSpider):
"""Spider that reads urls from redis queue (myspider:start_urls)."""
name = 'org_demo'
redis_key = 'org_domain:start_urls'
allowed_domains = ['net.cn']
start_urls = ['http://panda.www.net.cn/cgi-bin/check.cgi?area_domain=quandashi.com']
def errback_twisted(self, failure):
if failure.check(TimeoutError, TCPTimedOutError, DNSLookupError):
request = failure.request
connect.sadd(fail_url, request.url)
if failure.check(HttpError):
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
response = failure.response
request = failure.request
connect.sadd(fail_url, request.url)
logger.error('HttpError on %s', response.url)
def parse(self, response):
for x in range(connect.llen(keyword_key)):
kw = connect.lindex(keyword_key, 0).decode('utf-8').strip()
connect.lrem(keyword_key, kw)
print(kw)
item = DomaintestItem()
item['kw'] = kw
kw_pinyin = Pinyin().get_pinyin(kw).replace('-', '').replace(' ', '').replace('.', '').replace('·', '').lower()
domain_type_ls = ['org']
for domain_tp in domain_type_ls:
aim_url = 'http://panda.www.net.cn/cgi-bin/check.cgi?area_domain=' + kw_pinyin + '.' + domain_tp
yield scrapy.Request(url=aim_url, callback=self.parse_detail, meta={'item': item},
errback=self.errback_twisted)
def parse_detail(self, response):
item = response.meta['item']
item['domain_url'] = re.compile('<key>(.*)</key>').findall(response.text)[0]
item['domain_type'] = re.compile(r'.*?\.(.*)').findall(item['domain_url'])[0]
item['domain_status'] = re.compile('<original>(.*)</original>').findall(response.text)[0]
yield item
|
[
"34021500@qq.com"
] |
34021500@qq.com
|
697fa08f57edffd58997de8841ab2a824deae6d6
|
29576504eee5ed9942f71cf17af62dca4fa5c8f2
|
/mafCoveragePickles/src/mafCoveragePickleCreator.py
|
57be03c05cf1835b0a64477e381533396502d3ce
|
[
"MIT"
] |
permissive
|
sorrywm/mafTools
|
8a744f6fdb4cd271ff268f01ba6ec99896a1ba54
|
601832a780f328d48893474f0f4934dcbf9df73c
|
refs/heads/master
| 2021-04-18T21:24:18.334694
| 2012-09-13T20:09:48
| 2012-09-13T20:09:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,278
|
py
|
#!/usr/bin/env python2.7
"""
mafCoveragePickleCreator.py
10 January 2011
dent earl
mafCoveragePickleCreator is a script that operates on a single maf
(multiple alignment format) file and extracts information from
the alignments of pairs of sequences. Output is a pickle.
The maf sequence name field must have the format
species.chromosome
e.g.
hg19.chr22
For slightly more detailed examples, check the
test.mafCoveragePickleCreator.py unittest.
Comparisons are between a species' chromosome and all other
specified species. So,
for each species S:
for each chromosome C in S:
for every species T, T != S:
paint all positions in C where T aligns (any chrom in T)
"""
##############################
# Copyright (C) 2009-2012 by
# Dent Earl (dearl@soe.ucsc.edu, dent.earl@gmail.com)
#
#
# ... and other members of the Reconstruction Team of David Haussler's
# lab (BME Dept. UCSC).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##############################
import cPickle
import numpy
from optparse import OptionParser
import os
import re
seqRegex = r'[ACGTUMRWSYKVHDBXN]+'
seqPat = re.compile(seqRegex)
class MafLine:
def __init__(self, genome='', chrom='', start=-1,
seqLength=-1, totalLength=-1, strand=0,
sequence=-1, lineno=-1):
self.genome = genome
self.chrom = chrom
self.start = start
self.seqLength = seqLength
self.strand = strand
self.totalLength = totalLength
self.sequence = sequence
self.lineno = lineno
def initOptions(parser):
parser.add_option('--maf', dest='maf',
help='input maf file')
parser.add_option('--species', dest='species',
help='comma separated list of species names to include in output')
parser.add_option('--pickle', dest='pickle',
help=('location where python pickle will be written, for use '
'with downstream analyses. By default this file is not created.'))
def checkOptions(options, args, parser):
for k, v in [('maf', options.maf), ('pickle', options.pickle),
('species', options.species)]:
if v is None:
parser.error('specify --%s' % k)
if not os.path.exists(options.maf):
parser.error('--maf %s does not exist' % options.maf)
if options.species is None:
options.speciesList = set()
else:
options.speciesList = set(options.species.split(','))
def readMaf(filename, options):
""" read a given maf file and create an array 'alignments'
which is a multilayered dict:
alignments[a.genome][a.chrom][b.genome][b.chrom] = numpy.zeros(a.totalLength)
where the numpy array represents the locations where genome b maps onto genome a
"""
alignments = {}
f = open(filename, 'r')
blocks = []
mafLineList = []
namepat = re.compile(r'^(.+?)\.(.*)')
for lineno, line in enumerate(f, 1):
line = line.strip()
if line.startswith('s'):
ml = extractMafLine(namepat, line, lineno, options)
if ml is not None:
mafLineList.append(ml)
elif line == '':
if len(mafLineList) > 0:
addBlockPairs(alignments, mafLineList, options)
mafLineList = []
if len(mafLineList) > 0:
addBlockPairs(alignments, mafLineList, options)
mafLineList = []
f.close()
return alignments
def extractMafLine(namepat, line, lineno, options):
""" parse a given line from a maf file into a
MafLine object.
"""
data = line.split()
if len(data) != 7:
raise RuntimeError('maf line with incorrect number of fields on lineno %d: %s' % (lineno, line))
ml = MafLine()
m = re.match(namepat, data[1])
if m is None:
raise RuntimeError('maf sequence line where name field has no chr on lineno %d: %s' % (lineno, line))
ml.genome = m.group(1)
if ml.genome not in options.speciesList:
return None
ml.chrom = m.group(2)
ml.start = int(data[2])
ml.seqLength = int(data[3])
ml.strand = int(data[4] + '1')
ml.totalLength = int(data[5])
ml.sequence = data[6]
ml.lineno = lineno
return ml
def addBlockPairs(alignments, mafLineList, options):
""" loop through all pairs of mafLines in the block list, store
the discovered alignments in the alignments dict.
"""
for a in mafLineList:
for b in mafLineList:
if a.genome == b.genome:
# we skip self-alignments
continue
if a.genome not in alignments:
alignments[a.genome] = {}
if a.chrom not in alignments[a.genome]:
alignments[a.genome][a.chrom] = {}
if b.genome not in alignments[a.genome][a.chrom]:
# each 'a' chrom will have an array for each 'b' genome, so long
# as they appear together in an alignment block.
alignments[a.genome][a.chrom][b.genome] = numpy.zeros(a.totalLength,
dtype=numpy.uint16)
addBlocksToArray(alignments[a.genome][a.chrom][b.genome], a, b)
# explicitly throw this away to help with memory
mafLineList = []
def addBlocksToArray(array, a, b):
""" array is a numpy array of columns in genome/chrom a that
are aligned to any position in genome/chrom b.
i.e. the length of 'genome/chrom a' = length of 'array'
a and b are both mafline objects.
"""
global seqPat
aList = [] # ranges as tuples
bList = []
# offsets allow us to keep track of the
# within genome coordinates of the sequence field.
# they are offsets from the sequence start field integer
# and are in the sequence strand direction
aOffsets = []
bOffsets = []
##########
# find all of the contiguous intervals of gapless sequence
# in the sequence fields of a and b.
# ranges are mathematically [start, end), i.e. half-open.
for m in re.finditer(seqPat, a.sequence):
aList.append((m.start(), m.end()))
if len(aOffsets) == 0:
aOffsets.append(0)
else:
aOffsets.append(aOffsets[-1] + m.end() - m.start())
for m in re.finditer(seqPat, b.sequence):
bList.append((m.start(), m.end()))
if len(bOffsets) == 0:
bOffsets.append(0)
else:
bOffsets.append(bOffsets[-1] + m.end() - m.start())
i, j = 0, 0 # index
prevI, prevJ = -1, -1
while i < len(aList) and j < len(bList):
if (i, j) == (prevI, prevJ):
print locals()
raise RuntimeError('Infinite loop detected')
prevI, prevJ = i, j
if aList[i][1] <= bList[j][0]:
# i ---
# j ---
# no overlap
i += 1
continue
if bList[j][1] <= aList[i][0]:
# i ---
# j ---
# no overlap
j += 1
continue
if aList[i][1] < bList[j][1] and aList[i][1] > bList[j][0] and aList[i][0] < bList[j][0]:
# i ----
# j ----
# overlap is bList[j][0]..aList[i][1]
# forgive the overly verbose indexing, it makes it easier to understand.
forStart = a.start + aOffsets[i] - aList[i][0] + bList[j][0]
forEnd = a.start + aOffsets[i] - aList[i][0] + aList[i][1]
incrementArrayIndices(array, a.totalLength, forStart, forEnd, a.strand)
i += 1
continue
if bList[j][1] < aList[i][1] and bList[j][1] > aList[i][0] and bList[j][0] < aList[i][0]:
# i ----
# j ----
# overlap is aList[i][0]..bList[j][1]
forStart = a.start + aOffsets[i] - aList[i][0] + aList[i][0]
forEnd = a.start + aOffsets[i] - aList[i][0] + bList[j][1]
incrementArrayIndices(array, a.totalLength, forStart, forEnd, a.strand)
j += 1
continue
if aList[i][0] >= bList[j][0] and aList[i][1] <= bList[j][1]:
# i ----
# j --------
# overlap is aList[i][0]..aList[i][1]
forStart = a.start + aOffsets[i] - aList[i][0] + aList[i][0]
forEnd = a.start + aOffsets[i] - aList[i][0] + aList[i][1]
incrementArrayIndices(array, a.totalLength, forStart, forEnd, a.strand)
i += 1
continue
if bList[j][0] >= aList[i][0] and bList[j][1] <= aList[i][1]:
# i --------
# j ----
forStart = a.start + aOffsets[i] - aList[i][0] + bList[j][0]
forEnd = a.start + aOffsets[i] - aList[i][0] + bList[j][1]
incrementArrayIndices(array, a.totalLength, forStart, forEnd, a.strand)
j += 1
continue
print locals()
raise RuntimeError('Unanticipated condition.')
def incrementArrayIndices(array, totalLength, forStart, forEnd, strand):
""" strand can be either 1 or -1. interval is half open: [forStart, forEnd)
"""
if strand == 1:
array[forStart : forEnd] += 1
else:
# revStart = chromLength - forEnd
# revEnd = chromLength - forStart
array[totalLength - forEnd : totalLength - forStart] += 1
def pickleData(data, filename, options):
""" record data to a pickle.
"""
if filename is None:
return
f = open(filename, 'wb')
cPickle.dump(data, f, 2) # 2 is the format protocol, 2 = binary
f.close()
def readPickles(args, *vargs):
""" read data from the pickles specified as positional arguments
"""
dataDict = {}
for p in args:
dataDict[p] = readPickle(p)
return dataDict
def readPickle(filename, *vargs):
""" Pulled out of readPickles like this so that other scripts may
use it as a module.
"""
FILE = open(filename, 'r')
t = cPickle.load(FILE)
FILE.close()
return t
def main():
usage = ('usage: %prog --maf path/to/file.maf --species=species1,species2,...\n\n'
'%prog is a script that operates on a single maf\n'
'(multiple alignment format) file and extracts information from\n'
'the alignments of pairs of sequences. Output is a pickle.\n'
'The maf sequence name field must have the format\n'
'species.chromosome\n'
'e.g.\n'
'hg19.chr22\n\n'
'For slightly more detailed examples, check the \n'
'test.mafCoveragePickleCreator.py unittest.\n\n'
'Comparisons are between a species\' chromosome and all other\n'
'specified species. So,\n'
'for each species S:\n'
' for each chromosome C in S:\n'
' for every species T, T != S:\n'
' paint all positions in C where T aligns (any chrom in T)'
)
parser = OptionParser(usage=usage)
initOptions(parser)
options, args = parser.parse_args()
checkOptions(options, args, parser)
alignmentDict = readMaf(options.maf, options)
pickleData(alignmentDict, options.pickle, options)
if __name__ == '__main__':
main()
|
[
"dearl@soe.ucsc.edu"
] |
dearl@soe.ucsc.edu
|
6b7543c3f302d393698c043884c197d6f1358ffc
|
9e8b11fb2e905cd7710bcd4ca441d48580e34fe6
|
/hypergan/gans/experimental/alialpha_gan.py
|
2bb8f2cca9ee3d391c9bf4e6da0bd7856fa9e6a2
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
logbie/HyperGAN
|
ee6cdafdc17f40e30ce585d8ba46bb30758c7b67
|
07b0f070c05a56b9931638512b06e79e3d936c64
|
refs/heads/master
| 2022-06-04T12:15:04.350622
| 2020-04-29T17:14:36
| 2020-04-29T17:14:36
| 110,853,824
| 0
| 0
|
MIT
| 2020-04-29T17:14:37
| 2017-11-15T15:50:32
|
Python
|
UTF-8
|
Python
| false
| false
| 9,107
|
py
|
import importlib
import json
import numpy as np
import os
import sys
import time
import uuid
import copy
from hypergan.discriminators import *
from hypergan.distributions import *
from hypergan.generators import *
from hypergan.inputs import *
from hypergan.samplers import *
from hypergan.trainers import *
import hyperchamber as hc
from hyperchamber import Config
from hypergan.ops import TensorflowOps
import tensorflow as tf
import hypergan as hg
from hypergan.gan_component import ValidationException, GANComponent
from ..base_gan import BaseGAN
from hypergan.distributions.uniform_distribution import UniformDistribution
from hypergan.trainers.experimental.consensus_trainer import ConsensusTrainer
class AliAlphaGAN(BaseGAN):
"""
"""
def __init__(self, *args, **kwargs):
BaseGAN.__init__(self, *args, **kwargs)
def required(self):
"""
`input_encoder` is a discriminator. It encodes X into Z
`discriminator` is a standard discriminator. It measures X, reconstruction of X, and G.
`generator` produces two samples, input_encoder output and a known random distribution.
"""
return "generator discriminator ".split()
def create(self):
config = self.config
ops = self.ops
with tf.device(self.device):
x_input = tf.identity(self.inputs.x, name='input')
# q(z|x)
encoder = self.create_encoder(x_input)
self.encoder = encoder
z_shape = self.ops.shape(encoder.sample)
uz_shape = z_shape
uz_shape[-1] = uz_shape[-1] // len(config.encoder.projections)
UniformDistribution = UniformDistribution(self, config.encoder, output_shape=uz_shape)
direction, slider = self.create_controls(self.ops.shape(UniformDistribution.sample))
z = UniformDistribution.sample + slider * direction
#projected_encoder = UniformDistribution(self, config.encoder, z=encoder.sample)
feature_dim = len(ops.shape(z))-1
#stack_z = tf.concat([encoder.sample, z], feature_dim)
#stack_encoded = tf.concat([encoder.sample, encoder.sample], feature_dim)
stack_z = z
generator = self.create_component(config.generator, input=stack_z)
self.uniform_sample = generator.sample
x_hat = generator.reuse(encoder.sample)
# z = random uniform
# z_hat = z of x
# g = random generated
# x_input
stacked_xg = ops.concat([generator.sample, x_input], axis=0)
stacked_zs = ops.concat([z, encoder.sample], axis=0)
standard_discriminator = self.create_component(config.discriminator, name='discriminator', input=stacked_xg, features=[stacked_zs])
z_discriminator = self.create_z_discriminator(UniformDistribution.sample, encoder.sample)
standard_loss = self.create_loss(config.loss, standard_discriminator, x_input, generator, 2)
encoder_loss = self.create_loss(config.eloss or config.loss, z_discriminator, z, encoder, 2)
trainer = self.create_trainer(None, None, encoder, generator, encoder_loss, standard_loss, standard_discriminator, z_discriminator)
self.session.run(tf.global_variables_initializer())
self.trainer = trainer
self.generator = generator
self.uniform_distribution = uniform_encoder
self.slider = slider
self.direction = direction
self.z = z
self.z_hat = encoder.sample
self.x_input = x_input
self.autoencoded_x = x_hat
rgb = tf.cast((self.generator.sample+1)*127.5, tf.int32)
self.generator_int = tf.bitwise.bitwise_or(rgb, 0xFF000000, name='generator_int')
self.random_z = tf.random_uniform(ops.shape(UniformDistribution.sample), -1, 1, name='random_z')
if hasattr(generator, 'mask_generator'):
self.mask_generator = generator.mask_generator
self.mask = mask
self.autoencode_mask = generator.mask_generator.sample
self.autoencode_mask_3_channel = generator.mask
def create_loss(self, loss_config, discriminator, x, generator, split):
loss = self.create_component(loss_config, discriminator = discriminator, x=x, generator=generator.sample, split=split)
return loss
def create_controls(self, z_shape):
direction = tf.random_normal(z_shape, stddev=0.3, name='direction')
slider = tf.get_variable('slider', initializer=tf.constant_initializer(0.0), shape=[1, 1], dtype=tf.float32, trainable=False)
return direction, slider
def create_encoder(self, x_input, name='input_encoder'):
config = self.config
input_encoder = dict(config.input_encoder or config.g_encoder or config.discriminator)
encoder = self.create_component(input_encoder, name=name, input=x_input)
return encoder
def create_z_discriminator(self, z, z_hat):
config = self.config
z_discriminator = dict(config.z_discriminator or config.discriminator)
z_discriminator['layer_filter']=None
net = tf.concat(axis=0, values=[z, z_hat])
encoder_discriminator = self.create_component(z_discriminator, name='z_discriminator', input=net)
return encoder_discriminator
def create_cycloss(self, x_input, x_hat):
config = self.config
ops = self.ops
distance = config.distance or ops.lookup('l1_distance')
pe_layers = self.gan.skip_connections.get_array("progressive_enhancement")
cycloss_lambda = config.cycloss_lambda
if cycloss_lambda is None:
cycloss_lambda = 10
if(len(pe_layers) > 0):
mask = self.progressive_growing_mask(len(pe_layers)//2+1)
cycloss = tf.reduce_mean(distance(mask*x_input,mask*x_hat))
cycloss *= mask
else:
cycloss = tf.reduce_mean(distance(x_input, x_hat))
cycloss *= cycloss_lambda
return cycloss
def create_z_cycloss(self, z, x_hat, encoder, generator):
config = self.config
ops = self.ops
total = None
distance = config.distance or ops.lookup('l1_distance')
if config.z_hat_lambda:
z_hat_cycloss_lambda = config.z_hat_cycloss_lambda
recode_z_hat = encoder.reuse(x_hat)
z_hat_cycloss = tf.reduce_mean(distance(z_hat,recode_z_hat))
z_hat_cycloss *= z_hat_cycloss_lambda
if config.z_cycloss_lambda:
recode_z = encoder.reuse(generator.reuse(z))
z_cycloss = tf.reduce_mean(distance(z,recode_z))
z_cycloss_lambda = config.z_cycloss_lambda
if z_cycloss_lambda is None:
z_cycloss_lambda = 0
z_cycloss *= z_cycloss_lambda
if config.z_hat_lambda and config.z_cycloss_lambda:
total = z_cycloss + z_hat_cycloss
elif config.z_cycloss_lambda:
total = z_cycloss
elif config.z_hat_lambda:
total = z_hat_cycloss
return total
def create_trainer(self, cycloss, z_cycloss, encoder, generator, encoder_loss, standard_loss, standard_discriminator, encoder_discriminator):
metrics = []
metrics.append(standard_loss.metrics)
d_vars = standard_discriminator.variables() + encoder_discriminator.variables()
g_vars = generator.variables() + encoder.variables()
print("D_VARS", d_vars)
print("G_VARS", g_vars)
#d_loss = standard_loss.d_loss
#g_loss = standard_loss.g_loss + cycloss
d_loss = standard_loss.d_loss+encoder_loss.d_loss
g_loss = standard_loss.g_loss+encoder_loss.g_loss
loss = hc.Config({'sample': [d_loss, g_loss], 'metrics':
{
'g_loss': standard_loss.g_loss,
'e_loss': encoder_loss.g_loss,
'ed_loss': encoder_loss.d_loss,
'd_loss': standard_loss.d_loss
}
})
trainer = ConsensusTrainer(self, self.config.trainer, loss = loss, g_vars = g_vars, d_vars = d_vars)
return trainer
def input_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [self.mask_generator.sample]
else:
extras = []
return extras + [
self.x_input,
self.slider,
self.direction,
self.uniform_distribution.sample
]
def output_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [
self.mask_generator.sample,
self.generator.g1x,
self.generator.g2x
]
else:
extras = []
return extras + [
self.encoder.sample,
self.generator.sample,
self.uniform_sample,
self.generator_int,
self.random_z
]
|
[
"mikkel@255bits.com"
] |
mikkel@255bits.com
|
654c0ae1949f522740a1b32adec1130cbc0b5203
|
edaac8eea187636ed1e5b4ca1a976160a0574f07
|
/users/migrations/0001_initial.py
|
277abdd6bee731a9603e2eeb7d7f60ed147c3dfc
|
[] |
no_license
|
mythxn/django-blog
|
87ea3df339461399427878e9c1cdc9e5da1d9532
|
3f866dbd02a06ed8e22b659808ad8e0c5231f9ea
|
refs/heads/master
| 2023-06-08T16:06:49.057272
| 2021-06-23T09:23:32
| 2021-06-23T09:23:32
| 378,936,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# Generated by Django 3.2.4 on 2021-06-22 08:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"mithun.shanoj@dubizzle.com"
] |
mithun.shanoj@dubizzle.com
|
6aeca5ac55acd107efab93b64200db2217306fb8
|
8559864e556c6a4ff788cf90fc7f56dac7194ec0
|
/web_test - lipei/scripts/handle_logging.py
|
a33ca1fd0bf82a30c0b61219c210990c550a5c86
|
[
"MIT"
] |
permissive
|
wl027728/web_demo_wl
|
1a8c389d6d1a1dfe9b724b850283e7e05d2ed725
|
52bae068afc4bd67f6a2e5b1d29ab7e78f1776f8
|
refs/heads/master
| 2023-03-20T09:52:13.486727
| 2021-03-14T13:27:26
| 2021-03-14T13:27:26
| 346,993,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
# -*- coding: utf-8 -*-
"""
Create Time: 2019/7/1 22:59
Author: 作者
"""
import logging
from scripts.handel_config import config
class HandleLogging:
def __init__(self):
self.case_logger = logging.getLogger(config.get_value('log','logger_name'))
self.case_logger.setLevel(config.get_value('log','logger_level'))
console_handle = logging.StreamHandler()
file_handle = logging.FileHandler(config.get_value('log','log_name'), encoding='utf-8')
# 4 定义日志输出渠道的日志等级
console_handle.setLevel(config.get_value('log','console_level'))
file_handle.setLevel(config.get_value('log','file_level'))
# 5.定义日志输入格式
simple_formatter = logging.Formatter(config.get_value('log','simple_formatter'))
complex_formatter = logging.Formatter(config.get_value('log','complex_formatter'))
console_handle.setFormatter(simple_formatter)
file_handle.setFormatter((complex_formatter))
# 6 对接 将日志收集器与输出渠道进行对接
self.case_logger.addHandler(console_handle)
self.case_logger.addHandler(file_handle)
def get_logger(self):
return self.case_logger
logger = HandleLogging().get_logger()
if __name__ == '__main__':
logger.debug("这个是一个debug级别的日志信息")
logger.info("这个是一个info级别的日志信息")
logger.warning("这个是一个warning级别的日志信息")
logger.error("这个是一个error级别的日志信息")
logger.critical("这个是一个critical级别的日志信息")
|
[
"1228030608.com"
] |
1228030608.com
|
8f0ef6ac7a3b575fbac83babbe1b800000ed78f5
|
740a26442d5dfa96c48c7a942550433841730ccf
|
/clinica/urls.py
|
dee8f6ff73f85d2c8f14fdf58d248772b082b8f1
|
[] |
no_license
|
antoniodeveloper10/agendamentos
|
07034c983a5c76b7aa27eb3f23b7e3eda993ddb6
|
130fa117b846343c21141c895436b387a9f7fa74
|
refs/heads/master
| 2023-04-09T02:18:31.428463
| 2021-04-16T13:44:04
| 2021-04-16T13:44:04
| 326,991,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
"""clinica URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
from rest_framework import routers
# aplicativos importados
from pacientes.api.viewsets import PacientesViewSet
from agendamentos.api.viewsets import AgendamentosViewSet
from historicos.api.viewsets import HistoricosViewSet
from imagens.api.viewsets import imagensHistoricosViewSet
from medicos.api.viewsets import MedicosViewSet
router = routers.DefaultRouter()
router.register(r'pacientes',PacientesViewSet)
router.register(r'agendamentos',AgendamentosViewSet)
router.register(r'historicos',HistoricosViewSet)
router.register(r'medicos',MedicosViewSet)
router.register(r'imagens_historicos',imagensHistoricosViewSet)
urlpatterns = [
path('',include(router.urls)),
path('admin/', admin.site.urls)
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rodrigues.jantonio84@gmail.com"
] |
rodrigues.jantonio84@gmail.com
|
b5d52cb8c65ddc883a018ebd0fbcf110e0ceb4f9
|
bc28f5d8ad5521e9127838d9cd4c73007247eb6e
|
/backlight.py
|
e1eafc494a90dac72f9a025c803ad5b543c3d3eb
|
[] |
no_license
|
nnarain/backlight
|
30a6c91fdfde1ac0b2c8b4000a604349e11ce0c2
|
d680a04a81e9432713592cd81cbeb0592475e23d
|
refs/heads/master
| 2020-03-22T04:11:33.757062
| 2018-07-08T04:57:42
| 2018-07-08T04:57:42
| 139,479,854
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,924
|
py
|
import time
import json
import logging
from neopixel import *
from queue import Queue
from threading import Thread
import paho.mqtt.client as mqtt
from argparse import ArgumentParser
class BacklightDriver:
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
CMD_ON = 0
CMD_OFF = 1
CMD_EXIT = 2
CMD_RAINBOW = 3
CMD_SOLID = 4
CMD_CLEAR = 5
KEY_STATE = 'state'
KEY_EFFECT = 'effect'
KEY_COLOR = 'color'
STATE_ON = 'ON'
STATE_OFF = 'OFF'
VALID_EFFECTS = ['rainbow', 'solid']
def __init__(self, led_count, led_pin):
self._led_count = led_count
self._led_pin = led_pin
self._state = {}
self._state_callback = None
self._update_state(self.KEY_STATE, self.STATE_OFF)
self._update_state(self.KEY_EFFECT, self.VALID_EFFECTS[0])
# Create NeoPixel object with appropriate configuration.
self._strip = Adafruit_NeoPixel(self._led_count, self._led_pin, self.LED_FREQ_HZ, self.LED_DMA, self.LED_INVERT, self.LED_BRIGHTNESS, self.LED_CHANNEL)
# Intialize the library (must be called once before other functions).
self._strip.begin()
self._cmd_queue = Queue()
self._animation_thread = None
self._animation_running = False
self._is_on = False
self._solid_color = Color(255, 0, 0)
def start(self):
self._animation_thread = Thread(target=self._animate)
self._animation_running = True
self._animation_thread.start()
self._is_on = True
self._update_state(self.KEY_STATE, self.STATE_ON)
def stop(self):
self.turn_off()
self._animation_running = False
if self._animation_thread:
self._animation_thread.join()
self._update_state(self.KEY_STATE, self.STATE_OFF)
def _animate(self):
while self._animation_running:
if not self._cmd_queue.empty():
(cmd, args) = self._cmd_queue.get(timeout=0.5)
if cmd is not None:
if cmd == self.CMD_ON:
self.turn_on()
elif cmd == self.CMD_OFF:
self.turn_off()
elif cmd == self.CMD_CLEAR:
self._colorWipe(Color(0,0,0), wait_ms=args)
if self._is_on:
if self._state[self.KEY_EFFECT] == 'rainbow':
self._rainbowCycle()
elif self._state[self.KEY_EFFECT] == 'solid':
self._colorWipe(self._solid_color)
def turn_on(self):
logging.info('Backlight on')
self._is_on = True
self._update_state(self.KEY_STATE, self.STATE_ON)
def turn_off(self, ms=50):
logging.info('Backlight off')
self._is_on = False
self._update_state(self.KEY_STATE, self.STATE_OFF)
self._cmd_queue.put((self.CMD_CLEAR, ms))
def set_effect(self, effect, args=None):
if effect not in self.VALID_EFFECTS:
logging.warn('Invalid effect provided: {}'.format(effect))
return
self._update_state(self.KEY_EFFECT, effect)
self._is_on = False
self._cmd_queue.put((self.CMD_ON, None))
def set_solid_color(self, color):
logging.info('Setting color to {}'.format(color))
self._solid_color = Color(color['g'], color['r'], color['b'])
self._update_state(self.KEY_COLOR, color)
def set_state_callback(self, callback):
self._state_callback = callback
def _update_state(self, k, v):
self._state[k] = v
if self._state_callback:
self._state_callback(self._state)
def get_state(self):
return self._state
def _rainbowCycle(self, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(self._strip.numPixels()):
self._strip.setPixelColor(i, self._wheel((int(i * 256 / self._strip.numPixels()) + j) & 255))
if not self._is_on:
return
self._strip.show()
time.sleep(wait_ms/1000.0)
# Define functions which animate LEDs in various ways.
def _colorWipe(self, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(self._strip.numPixels()):
self._strip.setPixelColor(i, color)
self._strip.show()
time.sleep(wait_ms/1000.0)
@staticmethod
def _wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
class BacklightMqttClient(object):
COMMAND_TOPIC = '/home/backlight/set'
STATE_TOPIC = '/home/backlight/state'
def __init__(self, backlight):
self._backlight = backlight
self._backlight.set_state_callback(self._publish_state)
# setup MQTT client
self._client = mqtt.Client()
self._client.on_connect = self._on_connect
self._client.on_message = self._on_message
self._topic_to_callback = {}
self.register(self.COMMAND_TOPIC, self._on_command)
# start the backlight
self._backlight.start()
def _on_command(self, command):
if 'state' in command:
state = command['state']
if state == 'ON':
self._backlight.turn_on()
elif state == 'OFF':
self._backlight.turn_off()
if 'effect' in command:
effect = command['effect']
self._backlight.set_effect(effect)
if 'color' in command:
color = command['color']
self._backlight.set_solid_color(color)
def _publish_state(self, state):
logging.info('Publishing state: {}'.format(state))
self._client.publish(self.STATE_TOPIC, json.dumps(state))
def _on_connect(self, client, userdata, flags, rc):
logging.info('MQTT client connected')
# subscribe to registered topics
for topic in self._topic_to_callback.keys():
client.subscribe(topic)
# publish state on connection to the broker
self._publish_state(self._backlight.get_state())
def _on_message(self, client, userdata, msg):
logging.info('Message recieved on topic "{}" with data: {}'.format(msg.topic, msg.payload))
# get callback for this topic and call it, if it exists
callback = self._topic_to_callback.get(msg.topic, None)
if callback:
payload = msg.payload.decode('utf-8')
try:
json_data = json.loads(payload)
callback(json_data)
except ValueError as e:
logging.error('Caught ValueError: {}'.format(e))
except TypeError as e:
logging.error('Caught TypeError: {}'.format(e))
except Exception as e:
logging.error('Caught unknown exception: {}'.format(e))
def connect(self, broker):
logging.info('Connecting to MQTT broker "{}"'.format(broker))
self._client.connect(broker)
def spin(self):
self._client.loop_forever()
def register(self, topic, callback):
self._topic_to_callback[topic] = callback
def main(args):
port = args.port
led_count = args.led_count
led_pin = args.led_pin
# create the driver
backlight = BacklightDriver(led_count, led_pin)
# create the mqtt client
client = BacklightMqttClient(backlight)
client.connect('localhost')
# loop
try:
client.spin()
except KeyboardInterrupt:
pass
backlight.stop()
logging.info('Exited')
if __name__ == '__main__':
logging.basicConfig(format="[%(levelname)-8s] %(filename)s:%(lineno)d: %(message)s", filename='backlight.log', level=logging.DEBUG)
parser = ArgumentParser()
parser.add_argument('-c', '--led-count', default=60, help='Number of LEDs on strip')
parser.add_argument('-g', '--led-pin', default=18, help='LED strip GPIO pin')
parser.add_argument('-p', '--port', default=6142, help='RPC server port')
args = parser.parse_args()
try:
main(args)
except Exception as e:
import traceback
logging.error('{}'.format(e))
traceback.print_exc()
|
[
"nnaraindev@gmail.com"
] |
nnaraindev@gmail.com
|
17a1106b266eb45e39440a6294b78f1e319191a1
|
2aec33af60857a7d8d0249ef6031d34989c3c787
|
/app/models.py
|
12939aff10389da91db7dd94e077f3c35a6f83f8
|
[] |
no_license
|
alexmon1989/houses_for_rents
|
e640d14176ada420b49e2c660cbf604aa42f41e8
|
2eb24c1c5692399005ef186575682784518b9025
|
refs/heads/master
| 2021-01-13T08:15:48.774822
| 2016-10-26T07:03:16
| 2016-10-26T07:03:16
| 71,801,865
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,715
|
py
|
from app import db
import datetime
class City(db.Model):
"""Класс модели для взаимодействия с таблицей БД *cities*."""
__tablename__ = 'cities'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
updated_at = db.Column(db.DateTime(),
default=datetime.datetime.now,
nullable=False,
server_default=db.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now, nullable=False)
def __init__(self, title):
""" Конструктор класса.
:param title: поле *title* в таблице.
:type title: str
"""
self.title = title
class Agent(db.Model):
"""Класс модели для взаимодействия с таблицей БД *agents*."""
__tablename__ = 'agents'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
agency = db.Column(db.String(255), default='')
phone_numbers = db.Column(db.String(255), default='')
email = db.Column(db.String(255), default='')
city_id = db.Column(db.Integer, db.ForeignKey('cities.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
updated_at = db.Column(db.DateTime(),
default=datetime.datetime.now,
nullable=False,
server_default=db.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now, nullable=False)
city = db.relationship('City', backref=db.backref('agents', lazy='dynamic', cascade="all,delete"))
def __init__(self, name, city_id, agency=None, phone_numbers=None, email=None):
""" Конструктор класса.
:param name: поле *name* в таблице.
:type name: str
:param city_id: поле *city_id* в таблице. Внешний ключ для *cities.id*.
:type city_id: int
:param agency: поле *agency* в таблице.
:type agency: str
:param phone_numbers: поле *phone_numbers* в таблице.
:type phone_numbers: str
:param email: поле *email* в таблице.
:type email: str
"""
self.name = name
self.city_id = city_id
self.agency = agency
self.phone_numbers = phone_numbers
self.email = email
class Manager(db.Model):
"""Класс модели для взаимодействия с таблицей БД *managers*."""
__tablename__ = 'managers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
agency = db.Column(db.String(255), default='')
phone_numbers = db.Column(db.String(255), default='')
email = db.Column(db.String(255), default='')
rate = db.Column(db.Float(), default=0, nullable=False)
city_id = db.Column(db.Integer, db.ForeignKey('cities.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
updated_at = db.Column(db.DateTime(),
default=datetime.datetime.now,
nullable=False,
server_default=db.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now, nullable=False)
city = db.relationship('City', backref=db.backref('managers', lazy='dynamic', cascade="all,delete"))
def __init__(self, name, city_id, agency=None, phone_numbers=None, rate=0, email=None):
""" Конструктор класса.
:param name: поле *name* в таблице.
:type name: str
:param city_id: поле *city_id* в таблице. Внешний ключ для *cities.id*.
:type city_id: int
:param agency: поле *agency* в таблице.
:type agency: str
:param phone_numbers: поле *phone_numbers* в таблице.
:type phone_numbers: str
:param rate: поле *rate* в таблице.
:type rate: float
:param email: поле *email* в таблице.
:type email: str
"""
self.name = name
self.city_id = city_id
self.agency = agency
self.phone_numbers = phone_numbers
self.rate = rate
self.email = email
class Listing(db.Model):
"""Класс модели для взаимодействия с таблицей БД *listings*."""
__tablename__ = 'listings'
id = db.Column(db.Integer, primary_key=True)
street_number = db.Column(db.String(255), default='')
street_address = db.Column(db.String(255), default='')
suburb = db.Column(db.String(255), default='')
property_type = db.Column(db.String(255), default='')
bedrooms = db.Column(db.SmallInteger(), default=1)
government_value = db.Column(db.Integer(), default=1)
current_rates = db.Column(db.Integer(), default=1)
median_rent_qv = db.Column(db.Integer(), default=1)
capital_growth = db.Column(db.Float(), default=1)
median_rent_tb = db.Column(db.Integer(), default=1)
city_id = db.Column(db.Integer, db.ForeignKey('cities.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
agent_id = db.Column(db.Integer, db.ForeignKey('agents.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=True)
manager_id = db.Column(db.Integer,
db.ForeignKey('managers.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=True)
updated_at = db.Column(db.DateTime(),
default=datetime.datetime.now,
nullable=False,
server_default=db.text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now, nullable=False)
city = db.relationship('City', backref=db.backref('listings', lazy='dynamic', cascade="all,delete"))
agent = db.relationship('Agent', backref=db.backref('listings', lazy='dynamic', cascade="all,delete"))
manager = db.relationship('Manager', backref=db.backref('listings', lazy='dynamic', cascade="all,delete"))
def __init__(self, city_id, street_number, street_address, suburb, property_type, bedrooms,
government_value=None, current_rates=None, median_rent_qv=None, capital_growth=None, median_rent_tb=None,
agent_id=None, manager_id=None):
"""Конструктор класса.
:param city_id: поле *city_id* в таблице. Внешний ключ для `cities.id`.
:type city_id: int
:param street_number: поле *street_number* в таблице
:type street_number: str
:param street_address: поле *street_address* в таблице
:type street_address: str
:param suburb: поле *suburb* в таблице
:type suburb: str
:param property_type: поле *property_type* в таблице
:type property_type: str
:param bedrooms: поле *bedrooms* в таблице
:type bedrooms: int
:param government_value: поле *government_value* в таблице
:type government_value: int
:param current_rates: поле *current_rates* в таблице
:type current_rates: int
:param median_rent_qv: поле *median_rent_qv* в таблице
:type median_rent_qv: int
:param capital_growth: поле *capital_growth* в таблице
:type capital_growth: float
:param median_rent_tb: поле *median_rent_tb* в таблице
:type median_rent_tb: int
:param agent_id: поле *agent_id* в таблице. Внешний ключ для *agents.id*.
:type agent_id: int
:param manager_id: поле *manager_id* в таблице. Внешний ключ для *managers.id*.
:type manager_id: int
"""
self.city_id = city_id
self.street_number = street_number
self.street_address = street_address
self.suburb = suburb
self.property_type = property_type
self.bedrooms = bedrooms
self.government_value = government_value
self.current_rates = current_rates
self.median_rent_qv = median_rent_qv
self.capital_growth = capital_growth
self.median_rent_tb = median_rent_tb
self.agent_id = agent_id
self.manager_id = manager_id
|
[
"alex.mon1989@gmail.com"
] |
alex.mon1989@gmail.com
|
aff26d032d996f020ce09b191b36ff0dc2d65c3e
|
ec528e05780dcac49394fbce06a8fe181774c526
|
/Django-master/mysite/mysite/urls.py
|
cf1d9aed18cd40cc93f6b3f8bd61bb6c741208f6
|
[] |
no_license
|
gurdaan/SOW4
|
7bc00926f2e572bd7cb0714ec36f7e9e2d03f341
|
4556531159ec2bd04814fafcd19e049617599a81
|
refs/heads/master
| 2022-04-17T14:58:32.798502
| 2020-04-20T06:34:15
| 2020-04-20T06:34:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Login.urls')),
path('Home', include('Home.urls')),
path('Home/SearchWo', include('SearchWo.urls')),
path('Home/WorkOrderEscalation', include('WorkOrderEscalation.urls')),
path('Home/WorkOrderEscalation/OutlookEscalate', include('OutlookEscalate.urls')),
path('Home/ProjectStatusTracker', include('StatusTracker.urls'))
]
|
[
"vivekkumarsingh.vk@gmail.com"
] |
vivekkumarsingh.vk@gmail.com
|
ed46e4b2416ba822b4250c7021427b5362f719a7
|
5b3806c892764673d929fbf96d069e5e8206d9b5
|
/basic/题/HW/105-滑动窗口-计算最接近的数.py
|
8d365e246aea72cebafa785c77bb6b98bf6f4d69
|
[] |
no_license
|
imlifeilong/MyAlgorithm
|
fdd2386e1ee129b50cf58ec3e9deb92eaca07dc0
|
45c26b4791fc95b19442b909b6744dbe07bf9a56
|
refs/heads/master
| 2023-09-01T06:55:47.353831
| 2023-08-31T22:38:56
| 2023-08-31T22:38:56
| 104,972,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
def main(data, k):
tmp = sorted(data)
# 计算原数组的中位数
mid = tmp[len(data) // 2]
minval = float('inf')
index = -1 # 符合条件的位置
# len(data) - k +1 为了保证不溢出
for i in range(len(data) - k + 1):
# 计算i 到 i+k-1的区间 因为range左闭右开 所以end 等于 (i+k-1) +1
start = i
end = (i + k - 1) + 1
# 计算 x[0]-x[1]-x[2]...x[i+k-1]
tmp = data[start]
for j in range(start + 1, end):
tmp -= data[j]
# 计算离中位数的距离
tmpval = abs(tmp - mid)
# 如果距离比较小,则记录下,并且记录当前值的位置i
if tmpval < minval:
minval = tmpval
index = i
print(index)
data = [1, 2, 3, 3]
k = 2
# data = [50, 50, 2, 3]
# k = 2
main(data, k)
|
[
"13772032410@163.com"
] |
13772032410@163.com
|
d0bf453c1533a614fef331459e2bb7e2f10d5208
|
869acefabb4accbef670c92d37d54f029bdb4094
|
/twoSum.py
|
fc27f63c06fc517a988553f496ff562e6617eaf2
|
[
"MIT"
] |
permissive
|
angadaws/LeetCode
|
f275ccd3c6e02540ae76cbfb31e77abead0773e9
|
8f418dc9b19dd00700012b57902e616188cc42bb
|
refs/heads/main
| 2023-05-15T10:37:57.070513
| 2021-06-12T18:25:45
| 2021-06-12T18:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
hashMap = {}
for idx in range(len(nums)):
currentNum = nums[idx]
difference = target-currentNum
if difference not in hashMap and currentNum not in hashMap:
hashMap[difference] = idx
else:
return [hashMap[currentNum], idx]
|
[
"caleberioluwa@gmail.com"
] |
caleberioluwa@gmail.com
|
f436bc0f2f1a20e0c00f8fce30e3c1fbab0668f1
|
1ce9979bf2c5bfcb819c959ab9536b030f97b6c2
|
/src/api/v1/users/serializers.py
|
f8bd3a4fad906ae3ef8431d73afb2d59bdce4fdc
|
[] |
no_license
|
Danirill/cp-2021-back
|
f3e3a60c4fe2c592ba3f35a2c63b8c4f964ccb96
|
539b38f5a71ac5e12eb3076371315b14cc330b06
|
refs/heads/master
| 2023-06-02T16:29:33.443421
| 2021-06-20T01:21:03
| 2021-06-20T01:21:03
| 378,348,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,151
|
py
|
from .models import User, UserToken
from rest_framework import serializers
from ..labels.serializers import LabelSerializer
from django.utils.text import gettext_lazy as _
from rest_framework import serializers
from rest_framework_simplejwt.tokens import RefreshToken, TokenError
class RefreshTokenSerializer(serializers.Serializer):
refresh = serializers.CharField()
default_error_messages = {
'bad_token': _('Token is invalid or expired')
}
def validate(self, attrs):
self.token = attrs['refresh']
return attrs
def save(self, **kwargs):
try:
RefreshToken(self.token).blacklist()
except TokenError:
self.fail('bad_token')
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True}, }
def create(self, validated_data):
user = User.objects.create(**validated_data)
user.save()
# AuthToken.objects.create(user)
return user
class SimpleUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'id', 'name')
class UserIdSerializer(serializers.Serializer):
user_id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
class UserSerializer(serializers.ModelSerializer):
labels = LabelSerializer(many=True)
class Meta:
model = User
fields = (
'id', 'email', 'name', 'labels', 'coach', 'is_confirmed',
'role', 'last_name', 'whatsapp_phone','experts','telegram_tag', 'birthday', 'phone')
class TokenSerializer(serializers.ModelSerializer):
class Meta:
model = UserToken
fields = ('id', 'confirmation_token', 'reset_password_token', 'user', 'phone_auth_code')
def create(self, validated_data):
token = UserToken.objects.create(**validated_data)
token.save()
return token
class EmailConfSerializer(serializers.Serializer):
email = serializers.EmailField(required=True)
class UserCreationSerializer(serializers.Serializer):
# Check request for values
email = serializers.EmailField(required=True)
name = serializers.CharField(required=True)
password = serializers.CharField(required=True)
class UserAutoCreationSerializer(serializers.Serializer):
# Check request for values
email = serializers.EmailField(required=True)
class TableRowSerialiser(serializers.Serializer):
user_id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
user_name = serializers.CharField()
user_registration_date = serializers.DateTimeField()
user_phone = serializers.CharField()
coach_id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
coach_name = serializers.CharField()
class UserInfoSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=False)
user_id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
phone = serializers.CharField(max_length=12, required=False)
whatsapp_phone = serializers.CharField(max_length=12, required=False)
telegram_tag = serializers.CharField(max_length=300, required=False)
birthday = serializers.DateField(required=False)
last_name = serializers.CharField(required=False)
name = serializers.CharField(required=False)
class Meta:
model = User
fields = ('email', 'user_id', 'phone', 'whatsapp_phone', 'telegram_tag', 'birthday', 'last_name', 'name')
class UserExpertsSerializerInput(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
experts = serializers.ListField(child=serializers.PrimaryKeyRelatedField(queryset=User.objects.all()))
class Meta:
model = User
fields = ('id', 'experts')
class UserExpertsSerializerOutput(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
experts = UserSerializer(many=True)
class Meta:
model = User
fields = ('id', 'experts')
|
[
"dzykin3@gmail.com"
] |
dzykin3@gmail.com
|
7917caf19c30eb4fd1363fb22874950a2184439e
|
3a52c30b5709c2e9882c52ce69aa467da4cb845b
|
/contrib/bitrpc/bitrpc.py
|
c1e0f7ab3d116a7f11b3656c0f0167d7e1d02930
|
[
"MIT"
] |
permissive
|
brlcoin/brlcoin
|
f530a80d6544370aaf291e58004552b44a97b5fd
|
8492b68be996d8f970a7c9537b4b97f93a91ab22
|
refs/heads/master
| 2020-03-18T20:43:57.160100
| 2018-05-29T03:03:43
| 2018-05-29T03:03:43
| 135,234,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,838
|
py
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:38316")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:38316")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Brlcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Brlcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
[
"contato@brlcoin.cash"
] |
contato@brlcoin.cash
|
fec9aaefded80f2a378f29d1b67103de08199993
|
1fda0ce3c79841b641fc1de13565815019140291
|
/cars.py
|
20ac8586ce99b4e07eecec98b0944beb5f3a5d74
|
[] |
no_license
|
Jparedes20/python_work
|
15f7669983e2f6ce82352a69b52e37b20f09f2ec
|
e4b3c5c7be196e9020cdce9509a94a6e823ffd5c
|
refs/heads/master
| 2020-04-06T06:54:49.544605
| 2016-08-30T14:54:07
| 2016-08-30T14:54:07
| 62,184,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
#Sorting a list() permanently with sort()
#LIst of the cars i would like to have
_dreamCars=['BMW','AUDI','MERCEDES','PORCHE','RANGE ROVER','MUSTANG']
print("\n\tThis is the list of cars I would love to drive: \n\t"+str(_dreamCars))
_dreamCars.sort()#sort() sort the list permanently
#print(_dreamCars.sort())Check this sintaxis error, compared with print(sorted(_ownCars)), where the list can
#be printed without been wrapped string()
print("\n\tThis is the list of cars but in order: \n\t"+str(_dreamCars))
print("\n\tAs you can see the list is been sorted permanently: \n\t"+str(_dreamCars))
#sort the list in reverse order with the parameter reverse=true
_dreamCars.sort(reverse=True)
print("\n\tThis is the list in reverse order: \n\t"+str(_dreamCars))
_ownCars=['ROGUE','JEEP','CHALLENGER','CHEVY']
#finding the lenght of a list with len()
print("\n\tI have: "+str(len(_ownCars))+" cars")
print("\n\tThese are the cars my family own: \n\t"+str(_ownCars))
print("\n\tThese are the cars my family own in order: \n\t"+str(sorted(_ownCars,reverse=True)))
#Notice the sorted() can take the argument reverse=True, the same as sort()
print("\n\tHere is the original list: \n\t"+str(_ownCars))
#printing a list in reverse order with reverse()
#We can print a list in reverse order without been sorted()
_ownCars.reverse()
print("\n\tWe can change the order of the list: \n\t"+str(_ownCars))
|
[
"alfaro.pg@gmail.com"
] |
alfaro.pg@gmail.com
|
34f9123a640b3cd1a16fd0411531ccfb080a2160
|
a617c7a18f4ea19d2bb61ef62694504aeac94fae
|
/CovidSentimentAnalyzer/settings.py
|
fefe8bab24250b6a5f4b97e74581b687f171bb71
|
[] |
no_license
|
vijay10148/sentiment
|
e0998a0ea1d6cfaa08c20098198a44f522e3888a
|
48dc29d77b41dd65afa4afd73f1c0cf6c67378c6
|
refs/heads/main
| 2023-08-19T00:36:42.553384
| 2021-10-07T07:28:11
| 2021-10-07T07:28:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,387
|
py
|
"""
Django settings for CovidSentimentAnalyzer project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-p8qvf@v$z279!o65)hkj#dydc7^b03ucs8-vee5q6sg-b!2e=f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
MODELS=os.path.join(BASE_DIR,'main/models')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CovidSentimentAnalyzer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CovidSentimentAnalyzer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"vijaykumar10148@gmail.com"
] |
vijaykumar10148@gmail.com
|
2f3eebfa5b78dc93387ef9ad1519fe33da01dbe4
|
6e060e9730b58e4d7819335438f915179504e72c
|
/bit_hr_ec/__openerp__.py
|
0e51f5c8441b47f79875a37f2d9082416b377860
|
[] |
no_license
|
missionpetroleumgit/addons_missiongit
|
4dcdf1d0e79da982670c573d59574a939d1636c0
|
714514719d5d4d96f371dd529a70ac282070c43b
|
refs/heads/master
| 2023-03-10T20:34:10.154050
| 2021-02-24T23:50:20
| 2021-02-24T23:50:20
| 342,027,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author : Guillermo Herrera Banda hguille25@yahoo.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Empleados Ecuador',
'version': '1.0',
'summary': 'Añade campos a la ficha del empleado y funcionalidad',
'description': """
""",
'author': 'Guillermo Herrera Banda',
'website': '',
'category': 'Human Resources',
'depends': ['hr'],
'init_xml': [],
'update_xml': [
'views/hr_employee.xml',
'views/family_burden_view.xml',
'views/education_level_view.xml',
'report/holiday_report_view.xml',
'report/hr_holidays_report.xml',
'views/report_holidays_view.xml',
'data/education_area.xml',
'views/res_bank.xml',
'views/res_partner.xml',
'views/res_company.xml',
'menu.xml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"daldaz@mission-petroleum.com"
] |
daldaz@mission-petroleum.com
|
51fc245df949d638dc027b183c2e11bde92216cc
|
0601f3ddf997761d1de71c49c9f9ba0fd23d461f
|
/store.py
|
be8fbaf6f970f53892c683720690ed2fc108fcb5
|
[] |
no_license
|
fuchami/kana_bot
|
58d94c0b9ff753875f259a4615cc92ca0adab413
|
59f2d1463def90423e71f4d116d600dbd8640267
|
refs/heads/master
| 2020-04-04T05:14:25.061969
| 2018-11-01T15:58:32
| 2018-11-01T15:58:32
| 155,738,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
#-*- encoding:utf-8 -*-
"""
西野カナの歌詞botから収集
""""
import requests_oauthlib
import requests
import json
import sys, os, re
import MeCab
import random
#ツイートの取得
def get_tw(account):
session = requests_oauthlib.OAuth1Session(
"", #Consumer Key
"", #Consumer Secret
"", #Access Token
"" #Access Token Secret
)
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
res = session.get(url, params = {'screen_name': account, 'count':200,'include_rts':False})
maxid = 0
i = 0
f = open("data.txt", "a")
#ツイートを取得
while True:
res_text = json.loads(res.text)
for r in res_text:
if maxid > r['id'] or maxid == 0:
maxid = r['id']
tw = r['text'].encode('utf-8')
f.write(tw)
i = i +1
if 500<= i:
break
res = session.get(url, params = {'screen_name': account, 'count':200,'include_rts':False, 'max_id': r['id']-1})
#APIの呼び出し・ステータスコード判定
#200が正常終了である
if res.status_code != 200:
#正常終了しなければエラー表示
print ("Twitter API Error: %d" % res.status_code)
sys.exit(1)
f.close()
return 0
#main関数
def main():
#ツイートを取得する
get_tw("kanayan_lyrics")
get_tw("kana_lyrics")
print("ツイートを収集しました")
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"famichiki.yuuki@gmail.com"
] |
famichiki.yuuki@gmail.com
|
b1acc0f04b1062751b2db87f091c8913e6d51317
|
5695d6a7d6e190cd5de1b6c115f9b2d57b19bc1b
|
/app/models.py
|
30910647719dcfcfbf4d21e16491cfb9a2f30d71
|
[
"Apache-2.0"
] |
permissive
|
ibm-devops/res-devops-insights
|
6690def921dec3ed5ddb2bab73c45d41cb847aa4
|
bebc3a1c3b1916cbcb5cec05f1f83899a0bf3e7f
|
refs/heads/master
| 2020-12-30T16:15:36.618886
| 2017-05-11T15:07:40
| 2017-05-11T15:07:40
| 90,972,980
| 1
| 0
| null | 2017-05-11T14:02:06
| 2017-05-11T11:43:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
######################################################################
# Copyright 2016, 2017 John Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
import pickle
from cerberus import Validator
from flask import url_for
from werkzeug.exceptions import NotFound
from custom_exceptions import DataValidationError
######################################################################
# Pet Model for database
# This class must be initialized with use_db(redis) before using
# where redis is a value connection to a Redis database
######################################################################
class Pet(object):
schema = {
'id': {'type': 'integer'},
'name': {'type': 'string', 'required': True},
'category': {'type': 'string', 'required': True},
'available': {'type': 'boolean', 'required': True}
}
__validator = Validator(schema)
__redis = None
def __init__(self, id=0, name=None, category=None, available=True):
self.id = int(id)
self.name = name
self.category = category
self.available = available
def self_url(self):
return url_for('get_pets', id=self.id, _external=True)
def save(self):
if self.name == None: # name is the only required field
raise AttributeError('name attribute is not set')
if self.id == 0:
self.id = self.__next_index()
Pet.__redis.set(self.id, pickle.dumps(self.serialize()))
def delete(self):
Pet.__redis.delete(self.id)
def __next_index(self):
return Pet.__redis.incr('index')
def serialize(self):
return { "id": self.id, "name": self.name, "category": self.category, "available": self.available }
def deserialize(self, data):
if isinstance(data, dict) and Pet.__validator.validate(data):
self.name = data['name']
self.category = data['category']
self.available = data['available']
else:
raise DataValidationError('Invalid pet data: ' + str(Pet.__validator.errors))
return self
######################################################################
# S T A T I C D A T A B S E M E T H O D S
######################################################################
@staticmethod
def use_db(redis):
Pet.__redis = redis
@staticmethod
def remove_all():
Pet.__redis.flushall()
@staticmethod
def all():
# results = [Pet.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']
results = []
for key in Pet.__redis.keys():
if key != 'index': # filer out our id index
data = pickle.loads(Pet.__redis.get(key))
pet = Pet(data['id']).deserialize(data)
results.append(pet)
return results
@staticmethod
def find(id):
if Pet.__redis.exists(id):
data = pickle.loads(Pet.__redis.get(id))
pet = Pet(data['id']).deserialize(data)
return pet
else:
return None
@staticmethod
def find_or_404(id):
pet = Pet.find(id)
if not pet:
raise NotFound("Pet with id '{}' was not found.".format(id))
return pet
@staticmethod
def find_by_category(category):
# return [pet for pet in Pet.__data if pet.category == category]
results = []
for key in Pet.__redis.keys():
if key != 'index': # filer out our id index
data = pickle.loads(Pet.__redis.get(key))
if data['category'] == category:
results.append(Pet(data['id']).deserialize(data))
return results
@staticmethod
def find_by_availability(available=True):
# return [pet for pet in Pet.__data if pet.available == available]
results = []
for key in Pet.__redis.keys():
if key != 'index': # filer out our id index
data = pickle.loads(Pet.__redis.get(key))
if data['available'] == available:
results.append(Pet(data['id']).deserialize(data))
return results
|
[
"rofrano@us.ibm.com"
] |
rofrano@us.ibm.com
|
4ac5568a0abde39bef776273621a20d4ac1758df
|
1df72ac43c4a74b2b8f33f4af47c8aefaaddc150
|
/missingvalue/missingvalue.py
|
cfe9d3ca5165015f0a82bc72ace5046ba1b19270
|
[] |
no_license
|
qwang94200/tags-up
|
07f7e6e2474c1a7653874449d6220b9cd32d832b
|
fef5ee437d9b401543095719771ab1e0c6f7b31a
|
refs/heads/master
| 2021-01-19T10:34:51.053056
| 2017-02-27T18:55:22
| 2017-02-27T18:55:22
| 82,211,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,653
|
py
|
import numpy as np
import pandas as pd
import pickle
import datetime
import scipy
from sklearn import preprocessing
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
import logging
logging.basicConfig(level=logging.DEBUG)
def score_function(y_true, y_pred):
return roc_auc_score(y_true, y_pred)
def _estimate_features(features, scores, threshold):
logging.warning("thresholds %f", threshold)
out_ifeatures=[]
for _i, _feature in enumerate(features):
if scores[_i]>threshold:
out_ifeatures.append(_i)
return out_ifeatures
def _estimate_scores(X_train, y_train, features):
_nround=5
_score=0
for feature in features:
X_train[feature]=X_train[feature].fillna(-1)
X_train=X_train[features].as_matrix()
clf = DecisionTreeClassifier()
for train_index, test_index in KFold(n_splits=_nround).split(X_train):
X_train2, X_test2 = X_train[train_index,:], X_train[test_index,:]
y_train2, y_test2 = y_train[train_index], y_train[test_index]
clf.fit(X_train2, y_train2)#fitting Random forest
y_pred=clf.predict(X_test2)
_score +=score_function(y_test2, y_pred)
return _score/_nround
def _estimate_classifiers(X_train, y_train, features):
for feature in features:
X_train[feature]=X_train[feature].fillna(-1)
X_train=X_train[features].as_matrix()
return _estimate_classifiers_matrix(X_train, y_train)
def _estimate_classifiers_matrix(X_train, y_train):
_nround=5
names = [ "Decision Tree", "Neural Net", "Naive Bayes" ]
classifiers=[ DecisionTreeClassifier(),
MLPClassifier(alpha=1),
GaussianNB(),
]
score=np.zeros(len(classifiers))
for train_index, test_index in KFold(n_splits=_nround).split(X_train):
X_train2, X_test2 = X_train[train_index,:], X_train[test_index,:]
y_train2, y_test2 = y_train[train_index], y_train[test_index]
for i in range(len(classifiers)):
clf=classifiers[i]
clf.fit(X_train2, y_train2)#fitting Random forest
y_pred=clf.predict(X_test2)
score[i]+=score_function(y_test2, y_pred)
score/=_nround
print("ROC AUC SCORE :")
print(score)
i=np.argmax(score)
print(names[i], score[i])
return score[i]
def _estimate_missing_value(X_train, y_train, ofilename):
features= [_feature for _feature in list(X_train.columns.values) if _feature!='Unnamed: 0']
#_estimate_threshold_missing_value(X_train, y_train, features, ofilename)
features = pickle.load( open( ofilename, "rb" ) )
# _estimate_variance_values(X_train, y_train, features, ofilename)
_estimate_covariance_values(X_train, y_train, features, ofilename)
def _estimate_threshold_missing_value(X_train, y_train, features, ofilename):
logging.warning('_estimate_threshold')
missingcounts={feature: X_train[feature].count() for feature in features}
_missingcountvalues=np.unique(np.array(missingcounts.values()))
logging.info(_missingcountvalues.size)
_counts=len(X_train.index)
_thresholds=xrange(22, 30)
_scores=np.zeros(len(_thresholds))
_feature_length=len(features)
for i in range(len(_thresholds)):
_count =_thresholds[i]*_counts/100
_countfeatures=[]
for feature in features:
if missingcounts[feature]> _count:
_countfeatures.append(feature)
if len(_countfeatures)!=_feature_length:
logging.info('estimate_threshold %d', _thresholds[i])
_feature_length=len(_countfeatures)
_scores[i]=_estimate_classifiers(X_train, y_train, _countfeatures)
i=np.argmax(_scores)
with open(ofilename, 'wb') as fp:
pickle.dump( np.array([feature for feature in features if missingcounts[feature]>20*_count/100]), fp)
return
def _estimate_threshold(X_train, y_train, func, threshold):
pass
"""
_max_threshold=np.max(df_thresholds)
_min_threshold=np.min(df_thresholds)
if isinstance(_max_threshold, float) or isinstance(_min_threshold, float):
funcInt=np.vectorize(pyfunc=lambda x: int(100*x))
df_thresholds=funcInt(df_thresholds)
_max_threshold=int(100*_max_threshold)
_min_threshold=int(100*_min_threshold)
thresholds=xrange(_min_threshold, _max_threshold, (_max_threshold-_min_threshold)/10)
scores=np.zeros(len(thresholds))
_nb_features=len(df_thresholds)
i_features=np.array([])
for _i_threshold, _threshold in enumerate(thresholds):
_ifeatures=np.array([_i for _i, _feature in enumerate(df_thresholds) if df_thresholds[_i]> _threshold])
if len(_ifeatures) == _nb_features:
continue
else:
_nb_features=_ifeatures
_i_scores = _estimate_classifiers_matrix(X_train[:, _ifeatures], y_train)
if _i_scores>np.max(scores):
i_features=_ifeatures
scores[_i_threshold]=_i_scores
return
"""
def _estimate_variance_values(X_train, y_train, features, ofilename):
imputer = preprocessing.Imputer(missing_values="NaN", strategy="mean", axis=0)
X_train= imputer.fit_transform(X_train[features])
sel = VarianceThreshold()
X_train = sel.fit_transform(X_train)
_max_score = _estimate_classifiers_matrix(X_train, y_train)
_ifeatures = [_ifeature for _ifeature, v in enumerate(sel.get_support()) if v]
features = features[_ifeatures]
minmax_scale = preprocessing.MinMaxScaler().fit(X_train)
X_train = minmax_scale.transform(X_train)
_x, _y = X_train.shape
"""
df_std_thresholds=set()
for _id in xrange(_y):
df_std_thresholds.add(int(X_train[:, _id].std()*100))
df_std_thresholds=np.array(df_std_thresholds)
print(df_std_thresholds)
return
df_std_thresholds=np.sort(df_std_thresholds)
return
npfunc=np.vectorize(lambda x: x*0.01)
_ithresholds=npfunc(df_std_thresholds)
if _ithresholds.size>10:
_ithresholds=np.linspace (np.min(df_std_thresholds), np.max(df_std_thresholds), num=5)
"""
_ithresholds=np.linspace (0.05, 0.35, num=10)
scores=[]
for _ithreshold in _ithresholds:
sel = VarianceThreshold(threshold= _ithreshold)
try:
X_train_thresholds = sel.fit_transform(X_train)
_iscore = _estimate_classifiers_matrix(X_train_thresholds, y_train)
except ValueError:
logging.warning('value error')
scores.append(0)
scores.append(_iscore)
scores = np.array(scores)
_imax=np.argmax(scores)
logging.info("maximum scores %f", scores[_imax])
sel=VarianceThreshold(threshold=_ithresholds[_imax])
sel.fit_transform(X_train)
_ifeatures = [_ifeature for _ifeature, v in enumerate(sel.get_support()) if v]
with open(ofilename, 'wb') as fp:
pickle.dump(features[_ifeatures], fp)
return
def _estimate_covariance_values(X_train, y_train, features, ofilename):
imputer = preprocessing.Imputer(missing_values="NaN", strategy="mean", axis=0)
X_train= imputer.fit_transform(X_train[features])
_ilength, _jlength=X_train.shape
X_train = preprocessing.normalize(X_train, norm='l2')
logging.info('features: %d', _jlength)
"""
nproundfunc=np.vectorize(lambda x: round(x,4))
"""
from scipy import stats
_sortthresholdvalues=[]
_insertlength=[]
for i in xrange(_ilength):
for j in xrange(i+1, _jlength):
x=X_train[:,i]
y=X_train[:,j]
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
_value=(i,j, abs(p_value))
_sortthresholdvalues.append(_value)
#_sortthresholdvalues.sort(key=lambda tup: abs(tup[2]))
#_sortthresholdvalues.sort(key=lambda tup: tup[2], reverse=True)
scores=_estimate_classifiers_matrix(X_train, y_train)
_thresholds=np.sort(np.sort(np.unique(np.array([round(tup[2],2) for tup in _sortthresholdvalues if tup[2]>0]))))[::-1]
_ifeatures=np.ones(_jlength)
for _threshold in _thresholds:
lsvc = LinearSVC(C=_threshold, penalty="l1", dual=False).fit(X_train, y_train)
model = SelectFromModel(lsvc, prefit=True)
X_new = model.transform(X_train)
_ifeatures=model.get_support()
_iscore=_estimate_classifiers_matrix(X_train[:, [_i for _i, v in enumerate(_ifeatures) if v]], y_train)
if _iscore<scores:
break
with open(ofilename+'_temp', 'wb') as fp:
pickle.dump(features[_ifeatures], fp)
return
return
"""
with open('featurethresholdname', 'wb') as fp:
pickle.dump(_sortthresholdvalues, fp)
print(round(np.min(_sortthresholdvalues),2),round(np.max(_sortthresholdvalues),2))
return
_sortthresholdvalues= pickle.load( open( 'featurethresholdname', "rb" ))
"""
_thresholdvalues=np.linspace(round(np.min(_sortthresholdvalues),2),round(np.max(_sortthresholdvalues),2), num=10)
_thresholdvalues=_thresholdvalues[::-1]
scores=[]
print(_thresholdvalues)
for _threshold in _thresholdvalues:
tuplist=np.ones(_ilength)
_outlist= np.array([[tup[0], tup[1]] for tup in _sortthresholdvalues if abs(tup[2])>_threshold])
"""
np.random.shuffle(_outlist)
for i in xrange(_outlist.size):
a,b=_outlist[i]
if tuplist[a]==0 or tuplist[b]==0:
continue
tuplist[a]=0
#_ifeatures=[ for i in]
pass
"""
"""
return
std_scale = preprocessing.StandardScaler().fit(X_train)
df_std = std_scale.transform(X_train)
df_std_thresholds=[]
df_minmax_thresholds=[]
for _id, feature in enumerate(features):
df_std_thresholds.append(df_std[:, _id].std())
df_minmax_thresholds.append(df_minmax[:, _id].std())
_score_std, _ifeature_std = _estimate_threshold(X_train, y_train, np.array(df_std_thresholds))
_score_minmax, _ifeature_minmax = _estimate_threshold(X_train, y_train, np.array(df_minmax_thresholds))
logging.info("standardscales")
_thresholds=np.sort(np.unique(np.array([int(v*100) for v in df_std_thresholds])))[:-1]
scores=[]
if _thresholds.size>0:
for _threshold in _thresholds:
_out_ifeatures = _estimate_features(features, df_std_thresholds, _threshold*0.01)
scores.append( _estimate_classifiers_matrix(X_train[:,_out_ifeatures], y_train))
scores=np.array(scores)
if scores>_estimate_classifiers_matrix(X_train, y_train):
_i=np.argmax(scores)
with open(ofilename, 'wb') as fp:
pickle.dump(features[_estimate_features(features, df_std_thresholds, _thresholds[_i]*0.01)], fp)
return
if len(set(df_minmax_thresholds)) > 1:
logging.info("minmaxscales")
_currentfeatureCount=len(features)
_thresholds=np.sort(np.unique(np.array([int(v*100) for v in df_minmax_thresholds])))[:-1]
scores=[]
for _threshold in _thresholds:
_features = _estimate_features(features, df_minmax_thresholds, _threshold*0.01)
scores.append( _estimate_classifiers(X_train, y_train, _features))
scores=np.array(scores)
if scores>_estimate_scores(X_train, y_train, features):
_i=np.argmax(scores)
features = _estimate_features(features, df_std_thresholds, _thresholds[_i]*0.01)
with open(ofilename, 'wb') as fp:
pickle.dump(features, fp)
if len(set(df_minmax_thresholds)) > 0:
logging.info("minmax")
for _threshold in set(df_minmax_thresholds)-set(max(df_minmax_thresholds)):
pass
pass
"""
|
[
"qinna@banksup.fr"
] |
qinna@banksup.fr
|
817ba934ccc1fc01be7563d1b603dffaf4e97900
|
141f7a35d85fc48659961c26a70037376438c188
|
/ls05_MNIST_Expert.py
|
a6e5e626cbf6db868c9bf470cb8098e4e5e0e7b4
|
[
"MIT"
] |
permissive
|
yu961549745/tfnote
|
367947e0234cb8ef200acdbb0d54136e7e0db845
|
df21c414eecfd4e94dd0097f1d57dc2ff1546b70
|
refs/heads/master
| 2021-01-19T00:19:00.244740
| 2017-04-14T14:17:45
| 2017-04-14T14:17:45
| 87,152,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
""" 卷积神经网络训练MNIST """
#%%
# 准备工作
# 定义常量
import os
import shutil
import time
dataPath = 'MNIST_data'
modelSavePath = 'MNIST_conv'
modelCkpPath = os.path.join(modelSavePath, 'conv')
modelMetaFile = modelCkpPath + ".meta"
batchSize = 50
trainSteps = 500
logPeriod = 100
savePeriod = 1000
startStep = 0
# 读取数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(dataPath, one_hot=True)
import tensorflow as tf
import ls05_mnist as model
sess = tf.Session()
if startStep == 0:
input_image, output_valid, keep_prob, train_step, accuracy, cross_entropy, _ = model.build_graph()
# 第一次保存时清空现有文件夹
if os.path.exists(modelSavePath):
shutil.rmtree(modelSavePath)
os.mkdir(modelSavePath)
sess.run(tf.global_variables_initializer())
summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(modelSavePath, sess.graph)
saver = tf.train.Saver()
saver.export_meta_graph(modelMetaFile)
else:
saver = tf.train.import_meta_graph(modelMetaFile)
saver.restore(sess, modelCkpPath + '-' + str(startStep - 1))
input_image, output_valid, keep_prob, train_step, accuracy, cross_entropy, _ = model.restore_graph(
sess)
summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(modelSavePath, sess.graph)
# 训练
st = time.time()
for step in range(startStep, startStep + trainSteps):
batch = mnist.train.next_batch(batchSize)
if step % logPeriod == 0 or step == trainSteps - 1:
_, loss_value, summary_str = sess.run([train_step, cross_entropy, summary], feed_dict={
input_image: batch[0], output_valid: batch[1], keep_prob: 0.5})
print("step = %d, loss = %g, time=%.3f sec" %
(step, loss_value, time.time() - st))
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
else:
sess.run(train_step, feed_dict={
input_image: batch[0], output_valid: batch[1], keep_prob: 0.5})
if (step + 1) % savePeriod == 0 or step == trainSteps - 1:
savepath = saver.save(sess, modelCkpPath, global_step=step)
print("save check point in %s" % (savepath))
print("test accuracy %g" % sess.run(accuracy, feed_dict={
input_image: mnist.test.images, output_valid: mnist.test.labels, keep_prob: 1.0}))
sess.close()
|
[
"961549745@qq.com"
] |
961549745@qq.com
|
07d637deecc69512c48a0696bef739c3a3603102
|
c24b373d2d887d1f1cf128208f88efd497f94a77
|
/stat_functions.py
|
97144d6bebb90929bb20142f55aaa77674cb0e26
|
[] |
no_license
|
thebrandonlucas/StatCalc
|
03a9affdf001342ae98fb61ec625c97efd081ad6
|
dc5713fd692e0c24e832ae3a35e39b17176553b9
|
refs/heads/master
| 2020-04-10T11:31:17.770656
| 2018-12-09T02:13:32
| 2018-12-09T02:13:32
| 160,995,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
import re
#Split a string based on input into a list
def splist(string):
# new_str = ','.join(string.split())
outlist = re.split(' |,|\n|, |;|; ', string)
delimeters = [' ',',','\n',';']
if not any([x in string for x in delimeters]):
outlist = 'Error: Please use a space, comma, or colon as a delimeter'
return outlist
def floatlist(data):
inlist = [float(a) for a in splist(data)]
return inlist
def least_squares_estimates(xdata, ydata):
beta_naught_1 = sxy(xdata, ydata) / sxx(xdata)
beta_naught_0 = mean(ydata) - beta_naught_1 * mean(xdata)
return {"beta_naught_0":beta_naught_0, "beta_naught_1":beta_naught_1}
def least_squares_line(beta_naught_0, beta_naught_1):
y = 0
return y;
def sxy(xdata, ydata):
sxy = 0
mean_x = mean(xdata)
mean_y = mean(ydata)
for i in range(0, len(xdata)):
sxy += (xdata[i] - mean_x) * (ydata[i] - mean_y)
return sxy
def sxx(xdata):
sxx = 0
mean_x = mean(xdata)
for num in xdata:
sxx += (num - mean_x)**2
return sxx
def mean(data):
sum = 0
n = len(data)
for num in data:
sum += num
return sum / n
|
[
"bslucas2@crimson.ua.edu"
] |
bslucas2@crimson.ua.edu
|
e554785c0c9bfaf6944269219fde4e2ce7c936e8
|
6d37778aeca102f8af0d7e7f4b5630350700cb1e
|
/flask/venv/Lib/site-packages/pyspark/sql/functions.py
|
c5b6ad019e82d3543aa4012d66c679853c32fab6
|
[] |
no_license
|
genostack/book-recommender
|
92f24c56c27f9a91f3530d82a6fa51624c046066
|
87bbc13cbff0eeaa056576444f07cc1ed119d84c
|
refs/heads/master
| 2023-05-11T10:21:59.846792
| 2020-07-15T21:44:20
| 2020-07-15T21:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117,027
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6 = {
# unary math functions
'stddev': 'Aggregate function: alias for stddev_samp.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: alias for var_samp.',
'var_samp': 'Aggregate function: returns the unbiased sample variance of' +
' the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1 = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
'toDegrees': 'Deprecated in 2.1, use degrees instead.',
'toRadians': 'Deprecated in 2.1, use radians instead.',
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6.items():
globals()[_name] = since(1.6)(_create_function(_name, _doc))
for _name, _doc in _functions_2_1.items():
globals()[_name] = since(2.1)(_create_function(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""
.. note:: Deprecated in 2.1, use :func:`approx_count_distinct` instead.
"""
warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
return approx_count_distinct(col, rsd)
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=1.1568609015300986),
Row(age=5, name=u'Bob', rand=1.403379671529166)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=-0.7556247885860078),
Row(age=5, name=u'Bob', randn=-0.0861619008451133)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), count, default))
@since(1.4)
def lead(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), count, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.text.SimpleDateFormat` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_.
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_.
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(3.0)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'initcap': 'Returns a new string column by converting the first letter of each word to ' +
'uppercase. Words are delimited by whitespace.',
'lower': 'Converts a string column to lower case.',
'upper': 'Converts a string column to upper case.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param format: string that can contain embedded format tags and used as result column's value
:param cols: list of column names (string) or list of :class:`Column` expressions to
be used in formatting
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern):
"""
Splits str around pattern (pattern is a regular expression).
.. note:: pattern is a string represent the regular expression.
>>> df = spark.createDataFrame([('ab12cd',)], ['s',])
>>> df.select(split(df.s, '[0-9]+').alias('s')).collect()
[Row(s=[u'ab', u'cd'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
:param x: the array to be sliced
:param start: the starting index
:param length: the length of the slice
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc)) # noqa: F821 'lit' is dynamically defined.
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""Returns a new row for each element with position in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options)
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), options)
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
>>> df = spark.range(1)
>>> df.select(schema_of_json('{"a": 0}').alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col)
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+--------------------------------+
|map3 |
+--------------------------------+
|[1 -> a, 2 -> b, 3 -> c, 1 -> d]|
+--------------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
.. note:: Experimental
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. SCALAR_ITER
A scalar iterator UDF is semantically the same as the scalar Pandas UDF above except that the
wrapped Python function takes an iterator of batches as input instead of a single batch and,
instead of returning a single output batch, it yields output batches or explicitly returns an
generator or an iterator of output batches.
It is useful when the UDF execution requires initializing some state, e.g., loading a machine
learning model file to apply inference to every input batch.
.. note:: It is not guaranteed that one invocation of a scalar iterator UDF will process all
batches from one partition, although it is currently implemented this way.
Your code shall not rely on this behavior because it might change in the future for
further optimization, e.g., one invocation processes multiple partitions.
Scalar iterator UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
>>> pdf = pd.DataFrame([1, 2, 3], columns=["x"]) # doctest: +SKIP
>>> df = spark.createDataFrame(pdf) # doctest: +SKIP
When the UDF is called with a single column that is not `StructType`, the input to the
underlying function is an iterator of `pd.Series`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_one(batch_iter):
... for x in batch_iter:
... yield x + 1
...
>>> df.select(plus_one(col("x"))).show() # doctest: +SKIP
+-----------+
|plus_one(x)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
When the UDF is called with more than one columns, the input to the underlying function is an
iterator of `pd.Series` tuple.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_cols(batch_iter):
... for a, b in batch_iter:
... yield a * b
...
>>> df.select(multiply_two_cols(col("x"), col("x"))).show() # doctest: +SKIP
+-----------------------+
|multiply_two_cols(x, x)|
+-----------------------+
| 1|
| 4|
| 9|
+-----------------------+
When the UDF is called with a single column that is `StructType`, the input to the underlying
function is an iterator of `pd.DataFrame`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_nested_cols(pdf_iter):
... for pdf in pdf_iter:
... yield pdf["a"] * pdf["b"]
...
>>> df.select(
... multiply_two_nested_cols(
... struct(col("x").alias("a"), col("x").alias("b"))
... ).alias("y")
... ).show() # doctest: +SKIP
+---+
| y|
+---+
| 1|
| 4|
| 9|
+---+
In the UDF, you can initialize some states before processing batches, wrap your code with
`try ... finally ...` or use context managers to ensure the release of resources at the end
or in case of early termination.
>>> y_bc = spark.sparkContext.broadcast(1) # doctest: +SKIP
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_y(batch_iter):
... y = y_bc.value # initialize some state
... try:
... for x in batch_iter:
... yield x + y
... finally:
... pass # release resources here, if any
...
>>> df.select(plus_y(col("x"))).show() # doctest: +SKIP
+---------+
|plus_y(x)|
+---------+
| 2|
| 3|
| 4|
+---------+
3. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
4. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions. Note that only
unbounded window frame is supported at the moment:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = Window \\
... .partitionBy('id') \\
... .rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.5|
| 1| 2.0| 1.5|
| 2| 3.0| 6.0|
| 2| 5.0| 6.0|
| 2|10.0| 6.0|
+---+----+------+
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
"""
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
[
"zalexander@nycourts.gov"
] |
zalexander@nycourts.gov
|
9e3cbafd4a0b96b283ad9df026321443e390cc66
|
85324137fe84d89c13e45bf5160f13a3eb225bdc
|
/test_6/t_unittest.py
|
6ad1a4115d0e3f4a0816b6206bce0ea55c1ef99e
|
[] |
no_license
|
lazyemmm/py
|
f4d2b73aa40d4130313bd0d45f82e88a12a0320a
|
95dcf728adf4594b194ca181e0476eca72179c60
|
refs/heads/master
| 2022-12-14T20:37:13.543995
| 2020-08-28T10:57:22
| 2020-08-28T10:57:22
| 288,702,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
import unittest
from mydict import Dict
class TestDict(unittest.TestCase):
def test_init(self):
d = Dict(a=1, b='test')
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = Dict()
d['key'] = 'value'
self.assertEqual(d.key, 'value')
def test_attr(self):
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEqual(d['key'], 'value')
def test_keyerror(self):
d = Dict()
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
|
[
"fightboy_wu@163.com"
] |
fightboy_wu@163.com
|
e11b7ec7bd7a959a4703e5714fe059def43a28bc
|
3c1896aa16ad8cc7a3971a661711486d9aaf4918
|
/cart/migrations/0001_initial.py
|
a4a63563d51bd93488059cdb9afb94aaad793145
|
[] |
no_license
|
Akshayvijayan27/foodvlog
|
17ae5a6d4570b4afbcf644347f31927a65a6a70e
|
84dc8bca1a0cf025909060e8f66325f8935db18d
|
refs/heads/master
| 2023-06-28T16:55:32.037594
| 2021-08-06T07:57:58
| 2021-08-06T07:57:58
| 393,298,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
# Generated by Django 2.2 on 2021-07-31 08:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cartlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cart_id', models.CharField(max_length=250, unique=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"akshayvijayan5@gmail.com"
] |
akshayvijayan5@gmail.com
|
2e3c7ea7f655ba7213598a15fcc455998d7cbe2c
|
d3c77d6ee7c19d4a3bf224fcb2d6543f2d9fc8b9
|
/AirRegulationGUI Edits/Writing.py
|
006634c59160e4c549276c30e41975004c0e153a
|
[] |
no_license
|
RednirgSkizzif/HALTHASS
|
bf562f40ffe5f6b909dadbcffdd16e8eff4d04db
|
e18e1c1ca7d1c1c110e6a1f8aa45c2b47a27e1bb
|
refs/heads/master
| 2020-05-22T01:27:20.251099
| 2017-06-04T00:21:58
| 2017-06-04T00:21:58
| 63,896,924
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,100
|
py
|
import numpy as np
#import pyqtgraph as pg
import serial,glob,math
#import pyqtgraph.exporters
import time,threading,sys,os,fnmatch,shutil
#filename=raw_input("filename: ")
filename="gfile.txt"
def fWrite(filename,y):
#try :f = open(filename, 'w', os.O_NONBLOCK);f.flush();f.write(y+'\n');f.close()
try :f = open(filename, 'w');f.flush();f.write(y+'\n');f.close()
except:print ''
def rms(y):
sum_squares = 0
for i in range(0,len(y)):
sum_squares = sum_squares+y[i]*y[i]
return math.sqrt(sum_squares/float(len(y)))
def findArduino(boardName,exclude):
a = raw_input('Connect %s and press Enter'%boardName)
if a is not None:
ACMport = glob.glob('COM12')
for i in range(0,len(ACMport)):
if ACMport[i]==exclude: ACMport[i]=0
try : info=serial.Serial(ACMport[i]);address=ACMport[i]
except: print ''
print '%s = %s'%(boardName,address);print info
return info,address
class SerialReader(threading.Thread):
def __init__(self, portR, M, N, ch, toG):
threading.Thread.__init__(self)
self.portR = portR
#self.portW = portW
self.M = M
self.N = N
self.ch = ch
self.toG = toG
self.ptr = 0
self.sp = 0.0
self.buffer = np.zeros((N*M,ch),dtype=np.float)
self.buff_rms = np.zeros(( M,ch),dtype=np.float)
self.buff_gt = np.zeros(( M ),dtype=np.float)
self.exitFlag = False
self.exitMutex = threading.Lock()
self.dataMutex = threading.Lock()
def run(self):
exitMutex = self.exitMutex
dataMutex = self.dataMutex
buffer = self.buffer
buff_rms = self.buff_rms
buff_gt = self.buff_gt
t1 = time.time()
portR = self.portR
#portW = self.portW
M = self.M
N = self.N
ch = self.ch
toG = self.toG
count = 0
sp = None
while True:
with exitMutex:
if self.exitFlag:break
#Read gAMP from DUE
temp = np.empty((M,ch),dtype=np.uint16)
portR.flush()
portR.read(int(0.1*M*2*ch))
R = portR.read(M*2*ch)
R = np.fromstring(R,dtype=np.uint16)
leading = R[0]>>12
leading = leading-100/sps
R = R&4095
R = R.reshape(M,ch)
for i in range (0,ch) : temp[:,(leading+i)%ch] = R[:, i]
for i in range (0,ch) : R[:,i] = temp[:,ch-1-i]
avg = np.array([np.average(R[:,i]) for i in range(0,ch)])
gAMP = np.array([(R[:,i]-avg[i])/toG for i in range(0,ch)]).reshape(M,ch)
#Write gRMS to MEGA
gRMS = np.array([rms(gAMP[:,i]) for i in range(0,ch)])
gt = np.sqrt(gRMS[0]**2+gRMS[1]**2+gRMS[2]**2)
fWrite(filename,str(gt))
#portW.flush()
#portW.write(str(gt))
#print gRMS ;
print gt
#print (self.ptr/M)%N
count += self.M
t2 = time.time()
difft = t2-t1
if difft > 1.0:
if sp is None : sp = count / difft
else : sp = sp * 0.9 + (count / difft) * 0.1
count = 0
t1 = t2
with dataMutex:
buffer [ self.ptr : self.ptr + M ] = gAMP
buff_rms[ (self.ptr/M)%N ] = gRMS
buff_gt [ (self.ptr/M)%N ] = gt
self.ptr = (self.ptr + self.M) % (N*M)
if sp is not None : self.sp = sp
def get(self,M):
with self.dataMutex:
ptr = self.ptr
M = self.M
if ptr==0 : data = self.buffer[ptr-M : ]
else : data = self.buffer[ptr-M :ptr].copy()
rate = self.sp
return data , self.buff_rms[(ptr/M)%N],self.buff_gt[(ptr/M)%N]
def exit(self):
with self.exitMutex : self.exitFlag = True
########################################################################################
ArduinoRead = serial.Serial('COM12',9600)
#ArduinoWrite, address_write = findArduino('Arduino_write',address_read)
sps = 50
dt = 0.00002
ymax = 600
t_logging= 10
t_avg = 1.0
M = int(round(t_avg/dt))
N = 50
ch = 6
toG = 2.7365
t_data = [i*dt for i in range(0,M) ]
#case = input( 'Grms plot (1(y)/0(n)) : ')
#DataLogging = input( 'DataLogging (1(y)/0(n)) : ')
#if DataLogging ==1 : DataInfo = raw_input('Description : ')
########################################################################################
"""
c=[(255,0,0),(255,255,0),(0,255,0),(0,0,255),(165,42,42),(128,0,128)]
app = pg.mkQApp()
win = pg.GraphicsWindow(title='SAMPLING RATE = %d [kHz]'%sps)
win.resize(1000,400)
plt = win.addPlot(title="ADXL001-500z")
plt.setLabels(left=('Acceleration','[g]'),bottom=('Time','[s]'))
plt.setYRange(-ymax, ymax)
plt.setXRange( 0, t_avg)
plt.showGrid(x=True,y=True)
plt.addLegend()
if DataLogging ==1 : exporter = pg.exporters.CSVExporter(plt)
A0=plt.plot(pen=c[0],name='A0')
"""
"""
A1=plt.plot(pen=[1],name='A1');A2=plt.plot(pen=[2],name='A2')
A3=plt.plot(pen=c[3],name='A3');A4=plt.plot(pen=[4],name='A4');A5=plt.plot(pen=[5],name='A5')
"""
"""
if case==1 :
win.nextRow()
pl = win.addPlot(title='grms')
#G0=pl.plot(pen=c[0]);G1=pl.plot(pen=c[1]);G2=pl.plot(pen=c[2])
#G3=pl.plot(pen=c[3]);G4=pl.plot(pen=c[4]);G5=pl.plot(pen=c[5])
#Gt=pl.plot(pen='w',symbolBrush='w',symbolPen='w')
Gt=pl.plot(pen='w')
"""
########################################################################################
#thread = SerialReader(ArduinoRead,ArduinoWrite,M,N,ch,toG)
thread = SerialReader(ArduinoRead,M,N,ch,toG)
thread.start()
########################################################################################
"""
#GT=[]
#r=0
def update():
global ArduinoRead,ArduinoWrite,thread
#global plt,A0,A1,A2,A3,A4,A5,GT
#global pl ,G0,G1,G2,G3,G4,G5,Gt
global plt,A0,pl,Gt
data,grms,gt = thread.get(M)
GT.append(gt*(1+r))
pl.setLabels(left='%0.1f'%gt)
A0.setData(t_data,data[:,0])
A1.setData(t_data,data[:,1]);A2.setData(t_data,data[:,2])
A3.setData(t_data,data[:,3]);A4.setData(t_data,data[:,4]);A5.setData(t_data,data[:,5])
Gt.setData(GT[:r])
if DataLogging==1:
timestamp=time.time()
now=int(round(timestamp))
if (now%t_logging)==0:
localDATE=time.strftime('%y%m%d' ,time.localtime(timestamp))
localTIME=time.strftime('%H:%M:%S',time.localtime(timestamp))
filename='accData_'+localDATE+'_%s_'%DataInfo + localTIME+'.csv'
exporter.export(filename)
if not plt.isVisible():
thread.exit()
timer.stop()
ArduinoRead.close() ; ArduinoRead.delete()
ArduinoWrite.close(); ArduinoWrite.delete()
r+=1
"""
"""
timer = pg.QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if sys.flags.interactive == 0 : app.exec_()
"""
|
[
"Lab350"
] |
Lab350
|
2bf5e8b51ce1d700bbedcd84c114dac432c95da1
|
bacfc861d1da7d5d699b22b5cc9ee02045c93364
|
/code/plzsang/venv/Scripts/pip-script.py
|
669447bb3a958122c51aa01e9b24922ab505a638
|
[] |
no_license
|
teamsang/project
|
5e84daac3ec1c20af3016852266875fd1c9daa8a
|
14e8386927286fafee38399b99bab76ee9d8fd8d
|
refs/heads/master
| 2020-03-27T06:56:26.052068
| 2018-08-26T03:06:48
| 2018-08-26T03:06:48
| 146,148,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!C:\Users\jyh54\PycharmProjects\plzsang\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"jyh5496@naver.com"
] |
jyh5496@naver.com
|
2e2368f95aad9e14ce79eed8afeefa8a9599ace3
|
1374d4f8ff2ca5691e545446f27a6d9178683aed
|
/server.py
|
6726da34fa6ce6e8b6f1f4ece480b8f3dfb129d0
|
[] |
no_license
|
jingyu-cai/cynsona
|
1ca98cb303a5931d796c43ec08eec8525e69b45b
|
10695b220beb9e07f33b2cce1cc4bcecbbbc5d6e
|
refs/heads/master
| 2022-07-12T19:19:00.051229
| 2022-07-03T00:15:30
| 2022-07-03T00:15:30
| 214,367,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import serial
#Starts serial connection with arduino
ser = serial.Serial('COM11', 9600, timeout = 0.1)
#Records default message (so that we can ignore these later)
init = ser.readline();
while 1:
try:
#reads data from arduino
x = ser.readline()
#checks if the message is actually important
if(x != init and x != None):
#checks if arduino sent a message saying "bad"
if("bad" in str(x)):
#Prompts user to see if it was an accident
read = input("Your computer might have been stolen! This wasn't an accident right? (Y/N) ")
#Supposed to send message to arduino but doens't work
if('N' in read):
ser.write("1".encode());
except:
#stops the program if connection is lost
break
|
[
"noreply@github.com"
] |
jingyu-cai.noreply@github.com
|
50c0b6a76a0aa7684d1a09266d5abb15f4f07d9f
|
cc4b1ce2ae201f5c6d2ed39ecfc40b6069ffe3f1
|
/mysite/settings.py
|
3a64f7a4b66d5d472aedb3e40d40ea5c5a0013c7
|
[] |
no_license
|
acabhishek942/xamcheck-question-paper
|
e040c521f0d355d000782403eadef1728134f2c1
|
6cadcbc704a08edca4368b57bc17730d0ef7a0f4
|
refs/heads/master
| 2016-09-06T14:40:57.740499
| 2014-07-18T06:15:44
| 2014-07-18T06:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
"""
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-(+xx2bj@$*ys=z@$6#3=px-9#3*_b7bwky9p#@ydd(2!%9v2j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Question_paper',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"abhyuday.cool@gmail.com"
] |
abhyuday.cool@gmail.com
|
5f72db758ead62c934da2ed764610a834b4ff712
|
012a115aec7b9f2c31811dabddb9fd16419053b6
|
/main/dl/opt/_smorms3.py
|
02de6b61379bec9487b2e3875bd8ca12ba5ab10f
|
[
"MIT"
] |
permissive
|
kuroitu/MAIN
|
219104bf8b1aa5ad2ae92b7f1f7863aaeece77b3
|
223ba9c15e7066351b85198dba8efa1b94c0c273
|
refs/heads/main
| 2023-06-03T10:16:11.873696
| 2021-06-25T10:50:42
| 2021-06-25T10:50:42
| 380,199,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
from enum import IntEnum, auto
from dataclasses import dataclass, InitVar
import numpy as np
from numpy import ndarray
try:
from ._base import BaseOpt
except ImportError:
# For doctest
from main.dl.opt import BaseOpt
class _keys(IntEnum):
s = 0
m = auto()
v = auto()
zeta = auto()
@dataclass
class SMORMS3(BaseOpt):
"""SMORMS3 optimizer class.
Examples:
>>> import numpy as np
>>> obj = SMORMS3()
>>> print(obj)
SMORMS3(eta=0.001)
>>> obj.update(np.array([-0.5, 1]))
array([ 0.00141421, -0.00141421])
"""
kind: InitVar[int] = 4
eta: float = 1e-3
def __post_init__(self, *args, **kwds):
super().__post_init__(*args, **kwds)
self.previous[_keys.s] = 1
def update(self, grad, *args, **kwds):
"""Update calculation.
Args:
grad (ndarray): Gradient propagating from the lower layer.
Returns:
delta (ndarray): Update delta.
"""
rho = 1/(1+self._s)
self._s += 1 - self._zeta*self._s
self._m += (1-rho)*(grad - self._m)
self._v += (1-rho)*(grad*grad - self._v)
self._zeta = (self._m*self._m/self._v)
delta = -grad*np.minimum(self.eta, self._zeta)/np.sqrt(self._v)
return delta
@property
def _s(self):
return self.previous[_keys.s]
@_s.setter
def _s(self, value):
self.previous[_keys.s] = value
@property
def _m(self):
return self.previous[_keys.m]
@_m.setter
def _m(self, value):
self.previous[_keys.m] = value
@property
def _v(self):
return self.previous[_keys.v]
@_v.setter
def _v(self, value):
self.previous[_keys.v] = value
@property
def _zeta(self):
return self.previous[_keys.zeta]
@_zeta.setter
def _zeta(self, value):
self.previous[_keys.zeta] = value
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"Skuroitu@gmail.com"
] |
Skuroitu@gmail.com
|
e7c51a32088d56cf3a7594dbc7511983d14c4d7f
|
1f8baa3a170dbe90db6a9977b11dbc71dbb3f6bb
|
/blog/templatetags/custom_filters.py
|
4682d7e79e02467f1dca957181a286c34fe2615a
|
[] |
no_license
|
dryalcinmehmet/veripetegi_15.02.2020
|
b9cd9cea0f7023cf3e8456b62b3f0e907214a7fe
|
75d0c3a5a1e6efb0e7c84fb62f6c859e5a5d8230
|
refs/heads/master
| 2022-04-18T17:17:02.391955
| 2020-02-15T10:21:25
| 2020-02-15T10:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
from django import template
import blog.models as CustomModels
from django.contrib.auth.models import User
register = template.Library()
@register.simple_tag(takes_context=True)
def get_profile(context,user):
profile=CustomModels.CustomProfile.objects.get(user=User.objects.get(username=user))
return profile
@register.simple_tag(takes_context=True)
def get_post_title(context,title):
post_title=CustomModels.Post.objects.get(title=title)
return post_title
@register.simple_tag(takes_context=True)
def get_document_title(context,title):
document_title=CustomModels.Document.objects.get(title=title)
return document_title
|
[
"dryalcinmehmet@gmail.com"
] |
dryalcinmehmet@gmail.com
|
95c8bdc5ff3c1d6ddf443c1641e87dca04201fa0
|
1e268418b8f309e88e0fffa8b61f72aa03428c1d
|
/python-dsa/minimum element in rotated array..py
|
83bb531de1e8ddf8ab4492c8075ad42bed05e0aa
|
[
"Apache-2.0"
] |
permissive
|
abhishek-parashar/Right-From-Scratch
|
4b2b1787c02cc26efa4af35412289874ed0a28f7
|
e596344b0db95cfdeba876676885f062ef5f7c23
|
refs/heads/master
| 2023-05-25T07:14:12.633352
| 2021-06-06T11:05:25
| 2021-06-06T11:05:25
| 258,607,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 19 19:08:19 2020
@author: Abhishek Parashar
"""
def minimum(i,j):
start=0
end=j
while(start<=end):
mid=(start+end)//2
if((a[mid]<=a[mid+1]) and (a[mid]<=a[mid-1])):
return a[mid]
elif(a[start]<=a[mid]):
start=mid+1
elif(a[mid]<=a[end]):
end=mid-1
if__name__="__main__"
t=int(input())
while(t>0):
n=int(input())
a=list(map(int,input().split())
minimum(a,n)
t=t-1
|
[
"noreply@github.com"
] |
abhishek-parashar.noreply@github.com
|
1f2df82c4d9546b38972027af34b27ca2283c3af
|
c655ed4dd08af672e39343dea97202e8b23a5fb0
|
/hs3_count.py
|
e20886cd0563391c8d2dd45336596cd3a7eccb04
|
[] |
no_license
|
laika-monkey/optimal_estimation
|
e800625b1bdf80ae99fecf74ab7d67796694c0d4
|
c7310a3dcb2c758093f71b33708e2c24613791a8
|
refs/heads/master
| 2021-11-27T12:26:01.700911
| 2019-02-01T03:17:43
| 2019-02-01T03:17:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
#!/usr/bin/env python
def run_main():
#_count number of fields of view currently being used,
# how many there are total possible
# And report on current usage and potential
import subprocess as s
from glob import glob
import numpy as np
import os
dir_hs3 = '/data/wsessions/hs3'
dir_lbl = '/data/wsessions/LBL-RTM_hs3'
sizes = []
files = glob(dir_lbl + '/HS3*fov*')
print dir_lbl
mods = { 'M' : 1, 'G' : 10e3, 'T' : 10e6 }
for f in files:
#_start process, capture output
out = s.Popen(['du', '-hs', f], stdout=s.PIPE).communicate()[0]
#_read in size
mod = out.split('\t')[0][-1:]
size = float(out.split('\t')[0][:-1])
size *= mods[mod]
sizes.append(size)
#_convert to math friendly
sizes = np.array(sizes)
#_print stats about current scenario
arg = (sizes.sum(), sizes.mean(), sizes.max(), sizes.min())
print 'total: {0}, mean: {1}, max: {2}, min: {3}\n'.format(*arg)
#_get all potential fields of view
nfovs = []
hs3_2013_files = glob(dir_hs3 + '/SHIS.CPL.GDAS.COLLOC.13*nc')
for f in hs3_2013_files:
from netCDF4 import Dataset as D
#_open file, get size of fov
nfov = len(D(f, 'r').dimensions['fov'])
nfovs.append(nfov)
nfovs = np.array(nfovs)
tot_poss = nfovs.sum()
tot_size = tot_poss * sizes.mean()
tot_diff = tot_size - sizes.sum()
arg = (tot_poss, tot_size, tot_diff)
print 'total_possible: {0}, total_size: {1}, total_diff: {2}\n'.format(*arg)
if __name__ == '__main__':
run_main()
|
[
"wsessions@stormy.larkaen.no-ip.biz"
] |
wsessions@stormy.larkaen.no-ip.biz
|
ea255d2ab3f68b19d54694b175bac43dbf7fce21
|
f6003f9f25dcc182e9fbce7a96d0dabb9341744c
|
/Curso_Udemy_Aulas/Seção 15 - Decoradores em Python/Preservando Metadata com Wraps.py
|
f2514cd3a0cf9f328a01aad0f5bec47cc1f43ae7
|
[] |
no_license
|
henriquecl/Aprendendo_Python
|
60a87959714f82894e996c06b0a1b767838c38fc
|
672029855431795defafd7e20e8da319bf34e502
|
refs/heads/master
| 2023-06-08T10:48:13.667893
| 2021-06-22T00:55:14
| 2021-06-22T00:55:14
| 261,029,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
"""
Preservando Metadatas com Wraps
Metadatas -> Dados intrisecos em arquivos
Wraps -> Funções que envolvem elementos com diversas finalidades
# Problema
def ver_log(funcao):
def logar(*args, **kwargs):
Eu sou uma função (logar) dentro de outra
print(f'Você está chamando {funcao.__name__}')
print(f'Aqui a documentação {funcao.__doc__}')
return funcao(*args, **kwargs)
return logar
@ver_log
def soma(a, b):
Soma dos números
return a + b
#print(soma(10, 30))
print(soma.__name__)
print(soma.__doc__)
"""
# Resolução do Problema
from functools import wraps
def ver_log(funcao):
@wraps(funcao)
def logar(*args, **kwargs):
"""Eu sou uma função (logar) dentro de outra"""
print(f'Você está chamando {funcao.__name__}')
print(f'Aqui a documentação: {funcao.__doc__}')
return funcao(*args, **kwargs)
return logar
@ver_log
def soma(a, b):
"""Soma dois números"""
return a + b
# print(soma(10, 30))
print(soma.__name__)
print(soma.__doc__)
print(help(soma))
|
[
"64755074+henriquecl@users.noreply.github.com"
] |
64755074+henriquecl@users.noreply.github.com
|
182d4f6cda0195c2c631a0867400e4608fedd047
|
b514be6e7eb60e0de2e312a12a9cfb0f39bf4752
|
/DataProcessing.py
|
183104974983020c1a6753ef95cc8562d94c788b
|
[] |
no_license
|
Vinu999/Sentiment
|
50eee28ac09595595fc45e9f94cccbca65148eb7
|
ce828f1a7b270da2c202d37b6e56e6e51edf667c
|
refs/heads/master
| 2021-05-03T12:46:33.432812
| 2018-02-06T18:30:32
| 2018-02-06T18:30:32
| 120,499,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
# coding: utf-8
# In[75]:
import nltk
import re
import json
from nltk.corpus import qc, state_union
from nltk.tokenize import sent_tokenize, PunktSentenceTokenizer
training_data = qc.raw('train.txt')
tok = sent_tokenize(training_data)
with open("train.txt") as f:
text = f.read()
toc = text.split("\n")
with open('workfile.json', 'a') as outfile:
print("[",file=outfile)
for x in range(5452):
word_list = toc[x].split()
n = len(word_list)
lists = word_list[1:n]
start = word_list[0].split(':')
sentence = ""
for word in lists:
sentence += word + " "
with open('workfile.json', 'a') as outfile:
print(json.dumps({'Question': sentence, 'Class': start[0], 'Context': start[1]}, sort_keys=True, indent=4),file=outfile)
if x != 4:
print(",",file=outfile)
with open('workfile.json', 'a') as outfile:
print("]",file=outfile)
# In[77]:
with open("test.txt") as f:
text = f.read()
toc = text.split("\n")
with open('testfile.json', 'a') as outfile:
print("[",file=outfile)
for x in range(500):
word_list = toc[x].split()
n = len(word_list)
lists = word_list[1:n]
start = word_list[0].split(':')
sentence = ""
for word in lists:
sentence += word + " "
with open('testfile.json', 'a') as outfile:
print(json.dumps({'Question': sentence, 'Class': start[0], 'Context': start[1]}, sort_keys=True, indent=4),file=outfile)
if x != 4:
print(",",file=outfile)
with open('testfile.json', 'a') as outfile:
print("]",file=outfile)
# In[68]:
|
[
"noreply@github.com"
] |
Vinu999.noreply@github.com
|
8a9ba54afacbf97af409fd1250e3a720e4b9082a
|
ece81c9ca3c5326236e0e7b9c1be097e5fcac603
|
/home/.config/i3/i3-cycle.py
|
dc87400e5bd4d4c17d9707bebb4099eea93cac21
|
[] |
no_license
|
RemyLi/dotfiles
|
fd2f08db5ad7e797b20bdd956320aa2b4894a3ed
|
38566371b0e136e2ae9d29c69c6d167bf652b9eb
|
refs/heads/master
| 2022-05-26T14:29:35.323049
| 2020-04-29T10:00:32
| 2020-04-29T10:00:32
| 115,724,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
#!/usr/bin/env python2
import json
import tempfile
import os
import i3
LAST_FOCUS_FILE = '%s/i3-last-focus' % tempfile.gettempdir()
def _load():
if os.path.exists(LAST_FOCUS_FILE):
with open(LAST_FOCUS_FILE) as f:
return int(f.read())
return None
def _write(window_id):
with open(LAST_FOCUS_FILE, 'w') as f:
f.write(str(window_id))
def _get_focused_workspace():
"""
Gets the currently focused i3 workspace.
Returns:
dict: i3 workspace object
"""
return filter(lambda w: w['focused'], i3.get_workspaces())[0]
def _leaves(nodes):
for node in nodes:
if len(node['nodes']) == 0:
return nodes
else:
return _leaves(node['nodes'])
def cycle():
"""
Cycles the windows in an i3 workspace.
"""
last_focused_id = _load()
# Get focused workspace name
wksp_name = _get_focused_workspace()['name']
# Get actual workspace tree object
wksp = i3.filter(type='workspace', name=wksp_name)[0]
# Get list of all windows in workspace
wksp_windows = _leaves(wksp['nodes'])
window_ids = map(lambda n: n['id'], wksp_windows)
# Go to next window if saved state exists
if last_focused_id in window_ids:
next_idx = window_ids.index(last_focused_id) + 1
if next_idx >= len(wksp_windows):
next_idx = 0
next_focus_id = wksp_windows[next_idx]['id']
# Set default state, first unfocused
else:
unfocused = filter(lambda n: not n['focused'], wksp_windows)
next_focus_id = unfocused[0]['id']
_write(next_focus_id)
i3.focus(con_id=next_focus_id)
if __name__ == '__main__':
from pprint import pprint
cycle()
|
[
"remy.limouzin@smile.fr"
] |
remy.limouzin@smile.fr
|
500b09256042801617ec625b5402a16c3a380333
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/beginner/257_extract_users_dict_from_multiline_string/save2_nopass.py
|
f977c1ffafea37f99a48ead69a840910b1a097f5
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 596
|
py
|
# ___ get_users passwd s.. __ d..
# """Split password output by newline,
# extract user and name (1st and 5th columns),
# strip trailing commas from name,
# replace multiple commas in name with a single space
# return dict of keys = user, values = name.
# """
# passwd ?.s...
# passwd line ___ ? __ ? __ ?.s..
# keys # list
# values # list
# ___ p __ passwd
# k__.a.. p.s.. ':' 0
# __ l.. p.s.. ':' 4 __ 0
# ?.a.. 'unknown'
# ____
# ?.a.. p.s.. ':' 4 .r.. ',', ''
# r.. d.. z.. ? ?
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
bf6e2cd25defe39e56029d48cdd9359598164d48
|
e3f94fd8a7e62b98c739c4ebaffc3c8a32b3c041
|
/ML_action/tree/treePlotter.py
|
5873963422b3fc768089d8edec00226e1ed754df
|
[] |
no_license
|
TheOneAC/ML
|
94f8fcc1b8fd3b8a032e7af288c11689c395f72f
|
d0d02deb0dd656a8074ac4ef26f53904e8088ead
|
refs/heads/master
| 2020-05-21T21:38:06.026611
| 2017-09-16T17:05:49
| 2017-09-16T17:05:49
| 84,650,560
| 0
| 0
| null | 2017-09-16T17:02:24
| 2017-03-11T13:07:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle = "sawtooth", fc = "0.8")
leafNode = dict(boxstyle = "round4", fc = "0.8")
arrow_args = dict(arrowstyle = "<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy = parentPt, xycoords = "axes fraction",
xytext = centerPt, textcoords = "axes fraction",
va = "center", ha = "center", bbox = nodeType, arrowprops = arrow_args)
def createPlot():
fig = plt.figure(1, facecolor = "white")
fig.clf()
createPlot.ax1 = plt.subplot(111, frameon = False)
plotNode("a decision node",(0.5, 0.1), (0.1, 0.5), decisionNode)
plotNode("a leaf node",(0.8, 0.1), (0.3, 0.8), leafNode)
plt.show()
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1+ getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': \
{0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': \
{0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString)
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW,\
plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
plotTree(secondDict[key],cntrPt,str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff),
cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
|
[
"scuhss@gmail.com"
] |
scuhss@gmail.com
|
7f90dc7861838089a459669450753da130fc2146
|
ea3f0fe9764829061c57103a76b696d1a6fcd868
|
/02. Basic Syntax, Conditional Statements and Loops - Exercise/05_cant_sleep_count_sheep.py
|
059f0b6740d1beb39b9385dd39bbbabef73ca2ed
|
[] |
no_license
|
elenaborisova/Python-Fundamentals
|
ea7191cebe24f4ab134bdc148e572dc2b6c4928f
|
3525a38aaf8b7c3d9b38f450f636d7f44218cfef
|
refs/heads/main
| 2023-01-30T04:56:19.935971
| 2020-12-14T12:56:07
| 2020-12-14T12:56:07
| 307,526,038
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
num = int(input())
for i in range(1, num + 1):
print(f"{i} sheep...", end="")
|
[
"elenaborrisova@gmail.com"
] |
elenaborrisova@gmail.com
|
facffa06e1fab987af6da48bff5db5513078cecb
|
e6e81d0cd02223ca27f2c3f544b3c116e7617270
|
/Companies/Facebook/missingElement.py
|
4de22c00d92a4d96467cf099e3b2b899c3a3fd84
|
[] |
no_license
|
ashjambhulkar/objectoriented
|
86166640b0546713095dd5d8804fc78d31782662
|
6f07b50590ceef231be38d6d7b8c73a40c1152e9
|
refs/heads/master
| 2022-05-03T23:28:38.674275
| 2022-04-26T21:37:31
| 2022-04-26T21:37:31
| 249,091,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
class Solution:
def missingElement(self, nums: List[int], k: int) -> int:
result = [0]
for i in range(1, len(nums)):
test = nums[i]-nums[i-1]-1+result[-1]
result.append(test)
left, value = 0, 0
print(result)
for i in range(1, len(result)):
if result[i-1] <= k <= result[i]:
return nums[i-1] + (k-result[i-1])
return nums[-1] + k - result[-1]
|
[
"ashjambhulkar@hotmail.com"
] |
ashjambhulkar@hotmail.com
|
e9e6c81498d0cf7316240cef11e3fb9cc0a2e3f9
|
8b2d6f797b013e9d8473711ae96291a91750d550
|
/ciclo-for.py
|
7092b1fe8f0f7569cde7a041233337607342b3a6
|
[] |
no_license
|
CarlosCea/Curso-de-Python
|
516f8a78770e8e5828762c3c4392f6aa88cfe447
|
104a2abc63b7a954fb00e3448fba002f6855c9ec
|
refs/heads/master
| 2022-11-09T22:37:16.303209
| 2020-06-20T04:41:47
| 2020-06-20T04:41:47
| 271,706,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
for letra in "hola":
print(letra)
else:
print("fin ciclo for")
|
[
"ccea81@gmail.com"
] |
ccea81@gmail.com
|
409e0c63e880a6317636cdcc4ca2e944de9f46ac
|
8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6
|
/venv/Lib/site-packages/mypy/solve.py
|
8a8d2a4e235504cd05ac4f1b82a161fc775450be
|
[] |
no_license
|
RodrigoNeto/cursopythonyt
|
fc064a2e6106324e22a23c54bdb9c31040ac9eb6
|
279dad531e21a9c7121b73d84fcbdd714f435e7e
|
refs/heads/master
| 2023-07-03T00:54:09.795054
| 2021-08-13T12:42:24
| 2021-08-13T12:42:24
| 395,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
"""Type inference constraint solving"""
from typing import List, Dict, Optional
from collections import defaultdict
from mypy.types import Type, AnyType, UninhabitedType, TypeVarId, TypeOfAny, get_proper_type
from mypy.constraints import Constraint, SUPERTYPE_OF
from mypy.join import join_types
from mypy.meet import meet_types
from mypy.subtypes import is_subtype
def solve_constraints(vars: List[TypeVarId], constraints: List[Constraint],
strict: bool = True) -> List[Optional[Type]]:
"""Solve type constraints.
Return the best type(s) for type variables; each type can be None if the value of the variable
could not be solved.
If a variable has no constraints, if strict=True then arbitrarily
pick NoneType as the value of the type variable. If strict=False,
pick AnyType.
"""
# Collect a list of constraints for each type variable.
cmap = defaultdict(list) # type: Dict[TypeVarId, List[Constraint]]
for con in constraints:
cmap[con.type_var].append(con)
res = [] # type: List[Optional[Type]]
# Solve each type variable separately.
for tvar in vars:
bottom = None # type: Optional[Type]
top = None # type: Optional[Type]
candidate = None # type: Optional[Type]
# Process each constraint separately, and calculate the lower and upper
# bounds based on constraints. Note that we assume that the constraint
# targets do not have constraint references.
for c in cmap.get(tvar, []):
if c.op == SUPERTYPE_OF:
if bottom is None:
bottom = c.target
else:
bottom = join_types(bottom, c.target)
else:
if top is None:
top = c.target
else:
top = meet_types(top, c.target)
top = get_proper_type(top)
bottom = get_proper_type(bottom)
if isinstance(top, AnyType) or isinstance(bottom, AnyType):
source_any = top if isinstance(top, AnyType) else bottom
assert isinstance(source_any, AnyType)
res.append(AnyType(TypeOfAny.from_another_any, source_any=source_any))
continue
elif bottom is None:
if top:
candidate = top
else:
# No constraints for type variable -- 'UninhabitedType' is the most specific type.
if strict:
candidate = UninhabitedType()
candidate.ambiguous = True
else:
candidate = AnyType(TypeOfAny.special_form)
elif top is None:
candidate = bottom
elif is_subtype(bottom, top):
candidate = bottom
else:
candidate = None
res.append(candidate)
return res
|
[
"rodrigoneto.forseti@gmail.com"
] |
rodrigoneto.forseti@gmail.com
|
13ee4705a9e0b983fe4106dcf0d9cbab80cc5487
|
90d06e9cacd52f2ba07d55dd852cb3cb7171c452
|
/leetcode/273. Integer to English Words.py
|
10711bcffb79700dbaf952c1f45c4441e9ae1c82
|
[] |
no_license
|
shahidul2k9/problem-solution
|
efd91af08e103b552a225bca37660c51c60b98f2
|
0e970ac9c72f2ba13e66c180b208a2ec53886cd1
|
refs/heads/master
| 2022-10-16T04:04:31.556454
| 2022-09-24T08:09:46
| 2022-09-24T08:09:46
| 19,431,424
| 51
| 92
| null | 2022-09-24T08:09:47
| 2014-05-04T17:13:28
|
C++
|
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
class Solution:
def numberToWords(self, num: int) -> str:
digit_map = {
1: 'One',
2: 'Two',
3: 'Three',
4: 'Four',
5: 'Five',
6: 'Six',
7: 'Seven',
8: 'Eight',
9: 'Nine',
10: 'Ten',
11: 'Eleven',
12: 'Twelve',
13: 'Thirteen',
14: 'Fourteen',
15: 'Fifteen',
16: 'Sixteen',
17: 'Seventeen',
18: 'Eighteen',
19: 'Nineteen',
20: 'Twenty',
30: 'Thirty',
40: 'Forty',
50: 'Fifty',
60: 'Sixty',
70: 'Seventy',
80: 'Eighty',
90: 'Ninety'}
def single_digit(x) -> str:
if x == 0:
return ''
else:
return digit_map[x]
def two_digit(x) -> str:
if x < 10:
return single_digit(x)
elif x <= 20:
return digit_map[x]
else:
text = digit_map[(x // 10) * 10] + ' ' + single_digit(x % 10)
return text.strip()
def three_digit(x) -> str:
seq = []
if x // 100 > 0:
seq.append(single_digit(x // 100) + ' Hundred')
x %= 100
if x > 0:
seq.append(two_digit(x))
return ' '.join(seq)
if num == 0:
return 'Zero'
else:
seq = []
billion = num // 1_000_000_000
if billion > 0:
seq.append(three_digit(billion) + ' Billion')
num %= 1_000_000_000
million = num // 1_000_000
if million > 0:
seq.append(three_digit(million) + ' Million')
num %= 1_000_000
thousand = num // 1000
if thousand > 0:
seq.append(three_digit(thousand) + ' Thousand')
num %= 1000
seq.append(three_digit(num))
return ' '.join(seq).strip()
|
[
"shahidul2k9@gmail.com"
] |
shahidul2k9@gmail.com
|
e3fbc2dbf04f81e7629e6819e6d616b8ae68eb5b
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/pygments/lexers/parsers.py
|
80ef08904e7d49f6d3d9ecc8665cf09c19f393bf
|
[
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485
| 2022-09-18T20:44:45
| 2022-09-18T20:44:45
| 474,773,752
| 3
| 1
|
MIT
| 2022-11-03T20:07:50
| 2022-03-27T22:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 25,895
|
py
|
"""
pygments.lexers.parsers
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for parser generators.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
include, bygroups, using
from pygments.token import Punctuation, Other, Text, Comment, Operator, \
Keyword, Name, String, Number, Whitespace
from pygments.lexers.jvm import JavaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.objective import ObjectiveCLexer
from pygments.lexers.d import DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.ruby import RubyLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.perl import PerlLexer
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
'AntlrJavaLexer', 'AntlrActionScriptLexer',
'TreetopLexer', 'EbnfLexer']
class RagelLexer(RegexLexer):
"""
A pure Ragel lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
.. versionadded:: 1.1
"""
name = 'Ragel'
url = 'http://www.colm.net/open-source/ragel/'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'\[(\\\\|\\[^\\]|[^\\\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_]\w*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|--?', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>>?', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'\{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\]\\[{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\[^\\]|[^"\\])*"',
r"'(\\\\|\\[^\\]|[^'\\])*'",
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
}
class RagelEmbeddedLexer(RegexLexer):
"""
A lexer for `Ragel`_ embedded in a host language file.
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
.. versionadded:: 1.1
"""
name = 'Embedded Ragel'
aliases = ['ragel-em']
filenames = ['*.rl']
tokens = {
'root': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^%\'"/#]+', # exclude unsafe characters
r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\[^\\]|[^"\\])*"',
r"'(\\\\|\\[^\\]|[^'\\])*'",
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/', # regular expression
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
# Single Line FSM.
# Please don't put a quoted newline in a single line FSM.
# That's just mean. It will break this.
(r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
using(RagelLexer),
Punctuation, Text)),
# Multi Line FSM.
(r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'),
],
'multi-line-fsm': [
(r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
r'(' + r'|'.join((
r'[^}\'"\[/#]', # exclude unsafe characters
r'\}(?=[^%]|$)', # } is okay as long as it's not followed by %
r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
r'[^\\]\\[{}]', # ...and } is okay if it's escaped
# allow / if it's preceded with one of these symbols
# (ragel EOF actions)
r'(>|\$|%|<|@|<>)/',
# specifically allow regex followed immediately by *
# so it doesn't get mistaken for a comment
r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/\*',
# allow / as long as it's not followed by another / or by a *
r'/(?=[^/*]|$)',
# We want to match as many of these as we can in one block.
# Not sure if we need the + sign here,
# does it help performance?
)) + r')+',
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\[^\\]|[^"\\])*"',
r"'(\\\\|\\[^\\]|[^'\\])*'",
r"\[(\\\\|\\[^\\]|[^\]\\])*\]", # square bracket literal
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
)) + r')+', using(RagelLexer)),
(r'\}%%', Punctuation, '#pop'),
]
}
def analyse_text(text):
return '@LANG: indep' in text
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(RubyLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: ruby' in text
class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
.. versionadded:: 1.1
"""
name = 'Ragel in C Host'
aliases = ['ragel-c']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(CLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c' in text
class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
.. versionadded:: 1.1
"""
name = 'Ragel in D Host'
aliases = ['ragel-d']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(DLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: d' in text
class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
.. versionadded:: 1.1
"""
name = 'Ragel in CPP Host'
aliases = ['ragel-cpp']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(CppLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c++' in text
class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Objective C Host'
aliases = ['ragel-objc']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(ObjectiveCLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: objc' in text
class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super().__init__(JavaLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: java' in text
class AntlrLexer(RegexLexer):
"""
Generic `ANTLR`_ Lexer.
Should not be called directly, instead
use DelegatingLexer for your target language.
.. versionadded:: 1.1
.. _ANTLR: http://www.antlr.org/
"""
name = 'ANTLR'
aliases = ['antlr']
filenames = []
_id = r'[A-Za-z]\w*'
_TOKEN_REF = r'[A-Z]\w*'
_RULE_REF = r'[a-z]\w*'
_STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
_INT = r'[0-9]+'
tokens = {
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*$', Comment),
(r'/\*(.|\n)*?\*/', Comment),
],
'root': [
include('whitespace'),
include('comments'),
(r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
Punctuation)),
# optionsSpec
(r'options\b', Keyword, 'options'),
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
(r'(scope)(\s*)(' + _id + r')(\s*)(\{)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
(r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
(r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?',
bygroups(Keyword, Whitespace, Name.Label, Punctuation),
('rule-alts', 'rule-prelims')),
],
'exception': [
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('comments'),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
],
'rule-prelims': [
include('whitespace'),
include('comments'),
(r'returns\b', Keyword),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
# throwsSpec
(r'(throws)(\s+)(' + _id + ')',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(,)(\s*)(' + _id + ')',
bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
# optionsSpec
(r'options\b', Keyword, 'options'),
# ruleScopeSpec - scope followed by target language code or name of action
# TODO finish implementing other possibilities for scope
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
(r'(scope)(\s+)(' + _id + r')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
(r'(@' + _id + r')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
],
'rule-alts': [
include('whitespace'),
include('comments'),
# These might need to go in a separate 'block' state triggered by (
(r'options\b', Keyword, 'options'),
(r':', Punctuation),
# literals
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'<<([^>]|>[^>])>>', String),
# identifiers
# Tokens start with capital letter.
(r'\$?[A-Z_]\w*', Name.Constant),
# Rules start with small letter.
(r'\$?[a-z_]\w*', Name.Variable),
# operators
(r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
(r',', Punctuation),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
(r';', Punctuation, '#pop')
],
'tokens': [
include('whitespace'),
include('comments'),
(r'\{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ r')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
],
'options': [
include('whitespace'),
include('comments'),
(r'\{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
'|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
],
'action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^${}\'"/\\]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\[^\\]|[^"\\])*"',
r"'(\\\\|\\[^\\]|[^'\\])*'",
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
# backslashes are okay, as long as we are not backslashing a %
r'\\(?!%)',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'(\\)(%)', bygroups(Punctuation, Other)),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'nested-arg-action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks.
r'[^$\[\]\'"/]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\[^\\]|[^"\\])*"',
r"'(\\\\|\\[^\\]|[^'\\])*'",
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
]
}
def analyse_text(text):
return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
.. versionadded:: 1.1
"""
name = 'ANTLR With CPP Target'
aliases = ['antlr-cpp']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(CppLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
.. versionadded:: 1.1
"""
name = 'ANTLR With ObjectiveC Target'
aliases = ['antlr-objc']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(ObjectiveCLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
.. versionadded:: 1.1
"""
name = 'ANTLR With C# Target'
aliases = ['antlr-csharp', 'antlr-c#']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(CSharpLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Python Target'
aliases = ['antlr-python']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(PythonLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
.. versionadded:: 1.
"""
name = 'ANTLR With Java Target'
aliases = ['antlr-java']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(JavaLexer, AntlrLexer, **options)
def analyse_text(text):
# Antlr language is Java by default
return AntlrLexer.analyse_text(text) and 0.9
class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Ruby Target'
aliases = ['antlr-ruby', 'antlr-rb']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(RubyLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Perl Target'
aliases = ['antlr-perl']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super().__init__(PerlLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
.. versionadded:: 1.1
"""
name = 'ANTLR With ActionScript Target'
aliases = ['antlr-actionscript', 'antlr-as']
filenames = ['*.G', '*.g']
def __init__(self, **options):
from pygments.lexers.actionscript import ActionScriptLexer
super().__init__(ActionScriptLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
class TreetopBaseLexer(RegexLexer):
"""
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
.. versionadded:: 1.6
"""
tokens = {
'root': [
include('space'),
(r'require[ \t]+[^\n\r]+[\n\r]', Other),
(r'module\b', Keyword.Namespace, 'module'),
(r'grammar\b', Keyword, 'grammar'),
],
'module': [
include('space'),
include('end'),
(r'module\b', Keyword, '#push'),
(r'grammar\b', Keyword, 'grammar'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace),
],
'grammar': [
include('space'),
include('end'),
(r'rule\b', Keyword, 'rule'),
(r'include\b', Keyword, 'include'),
(r'[A-Z]\w*', Name),
],
'include': [
include('space'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'),
],
'rule': [
include('space'),
include('end'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)),
(r'[A-Za-z_]\w*', Name),
(r'[()]', Punctuation),
(r'[?+*/&!~]', Operator),
(r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
(r'([0-9]*)(\.\.)([0-9]*)',
bygroups(Number.Integer, Operator, Number.Integer)),
(r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
(r'\{', Punctuation, 'inline_module'),
(r'\.', String.Regex),
],
'inline_module': [
(r'\{', Other, 'ruby'),
(r'\}', Punctuation, '#pop'),
(r'[^{}]+', Other),
],
'ruby': [
(r'\{', Other, '#push'),
(r'\}', Other, '#pop'),
(r'[^{}]+', Other),
],
'space': [
(r'[ \t\n\r]+', Whitespace),
(r'#[^\n]*', Comment.Single),
],
'end': [
(r'end\b', Keyword, '#pop'),
],
}
class TreetopLexer(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
.. versionadded:: 1.6
"""
name = 'Treetop'
aliases = ['treetop']
filenames = ['*.treetop', '*.tt']
def __init__(self, **options):
super().__init__(RubyLexer, TreetopBaseLexer, **options)
class EbnfLexer(RegexLexer):
"""
Lexer for `ISO/IEC 14977 EBNF
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
.. versionadded:: 2.0
"""
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
tokens = {
'root': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'=', Operator, 'production'),
],
'production': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'"[^"]*"', String.Double),
(r"'[^']*'", String.Single),
(r'(\?[^?]*\?)', Name.Entity),
(r'[\[\]{}(),|]', Punctuation),
(r'-', Operator),
(r';', Punctuation, '#pop'),
(r'\.', Punctuation, '#pop'),
],
'whitespace': [
(r'\s+', Text),
],
'comment_start': [
(r'\(\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
include('comment_start'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[*)]', Comment.Multiline),
],
'identifier': [
(r'([a-zA-Z][\w \-]*)', Keyword),
],
}
|
[
"joao.a.severgnini@gmail.com"
] |
joao.a.severgnini@gmail.com
|
47165ea473f46e60cbd8a36c9564c7e8d21c27ff
|
4c17b690bab46c7e9439de451cd4913f8b85cacc
|
/NLP-Web-Apps/Summaryzer_Text_Summarization_App/nltk_summarization.py
|
60941867f3cc17acfb752192ee65f811ba165e69
|
[] |
no_license
|
animeshmohanty/A-Document-Redactor-and-Summarizer-Web-Application-Using-NLP-Spacy-and-Flask
|
4be81d4f713e818e7431c8ba95fb766a8bfcbb13
|
e9d99d9dcb69cf86148e176ec266396c27b39c65
|
refs/heads/master
| 2023-05-12T02:32:53.215238
| 2023-05-06T11:03:00
| 2023-05-06T11:03:00
| 260,978,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import heapq
#For NLTK extractive summarization is used
def nltk_summarizer(raw_text):
stopWords = set(stopwords.words("english"))
word_frequencies = {}
for word in nltk.word_tokenize(raw_text):
if word not in stopWords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
maximum_frequency = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word]/maximum_frequency)
sentence_list = nltk.sent_tokenize(raw_text)
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
summary_sentences = heapq.nlargest(7, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
return summary
|
[
"noreply@github.com"
] |
animeshmohanty.noreply@github.com
|
74dbf0dd140ba0adab154ac88116cad819a14242
|
73f3f8214af99a07b838cc9e2967b127f336bd85
|
/Project/experiments_classes.py
|
28c10fd7f8cc0ec9f1f8f215e5b731f7c5cae0fe
|
[] |
no_license
|
giuliasellitto7/maltesque2021
|
d67f9cf586f16401b56e75f3b63ef1e43f453e64
|
0cc3e5a9755174f2dfa54972d279ab0a05dd09d4
|
refs/heads/main
| 2023-07-11T02:08:17.179736
| 2021-08-18T07:54:41
| 2021-08-18T07:54:41
| 373,078,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,694
|
py
|
from enum import Enum, IntEnum
from datetime import datetime
class Performance:
def __init__(self, fit_time, precision, recall, accuracy, inspection_rate, f1_score, mcc):
self.fit_time = fit_time
self.precision = precision
self.recall = recall
self.accuracy = accuracy
self.inspection_rate = inspection_rate
self.f1_score = f1_score
self.mcc = mcc
class DatasetReleases:
def __init__(self, num_training_set_releases, training_set_releases, test_set_release):
self.num_training_set_releases = num_training_set_releases
self.training_set_releases = training_set_releases
self.test_set_release = test_set_release
def __str__(self):
string = "DatasetReleases: ["
string = string + "num_training_set_releases: " + str(self.num_training_set_releases) + ", "
string = string + "training_set_releases: ["
for s in self.training_set_releases:
string = string + s + " "
string = string + "], "
string = string + "test_set_release: " + self.test_set_release + "]"
return string
@staticmethod
def cross_validation():
return DatasetReleases(-1, ["ALL"], "ALL")
class ExperimentSetting:
def __init__(self, dataset, approach, validation, balancing, classifier):
self.dataset = dataset
self.approach = approach
self.validation = validation
self.balancing = balancing
self.classifier = classifier
def __str__(self):
string = "ExperimentSetting: ["
string = string + "dataset: " + self.dataset + ", "
string = string + "approach: " + self.approach + ", "
string = string + "validation: " + self.validation + ", "
string = string + "balancing: " + self.balancing + ", "
string = string + "classifier: " + self.classifier + "]"
return string
class BagOfWordsExecTime:
def __init__(self, vocabulary_building_time, frequency_vectors_building_time):
self.vocabulary_building_time = vocabulary_building_time
self.frequency_vectors_building_time = frequency_vectors_building_time
class Log:
@staticmethod
def build(experiment_setting, dataset_releases, bow_exec_time, performance):
# dd/mm/YY H:M:S
now_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
return {"date_time": [now_string], "dataset": [experiment_setting.dataset],
"approach": [experiment_setting.approach], "validation": [experiment_setting.validation],
"balancing": [experiment_setting.balancing], "classifier": [experiment_setting.classifier],
"num_training_set_releases": [dataset_releases.num_training_set_releases],
"test_set_release": [dataset_releases.test_set_release],
"vocabulary_building_time": [bow_exec_time.vocabulary_building_time],
"frequency_vectors_building_time": [bow_exec_time.frequency_vectors_building_time],
"fit_time": [performance.fit_time], "precision": [performance.precision],
"recall": [performance.recall], "accuracy": [performance.accuracy],
"inspection_rate": [performance.inspection_rate], "f1_score": [performance.f1_score],
"mcc": [performance.mcc]}
@staticmethod
def header():
return {"date_time": [], "dataset": [], "approach": [], "validation": [], "balancing": [], "classifier": [],
"num_training_set_releases": [], "test_set_release": [],
"vocabulary_building_time": [], "frequency_vectors_building_time": [],
"fit_time": [], "precision": [], "recall": [], "accuracy": [], "inspection_rate": [],
"f1_score": [], "mcc": []}
@staticmethod
def dummy():
return {"date_time": ["x"], "dataset": ["x"], "approach": ["x"], "validation": ["x"], "balancing": ["x"],
"classifier": ["x"], "num_training_set_releases": [0], "test_set_release": ["x"],
"vocabulary_building_time": [0], "frequency_vectors_building_time": [0], "fit_time": [0],
"precision": [0], "recall": [0], "accuracy": [0], "inspection_rate": [0], "f1_score": [0], "mcc": [0]}
class Dataset(IntEnum):
phpmyadmin = 1
class Approach(IntEnum):
metrics = 1
text = 2
class Validation(IntEnum):
cross_validation = 1
release_based = 2
class Classifier(IntEnum):
random_forest = 1
class Balancing(IntEnum):
none = 1
undersampling = 2
oversampling = 3
|
[
"noreply@github.com"
] |
giuliasellitto7.noreply@github.com
|
d9945149172f4f3192e1e6ead17a4b6d44a0bba5
|
1bd14e051251d08393731c03ccfb37a324227e1c
|
/troposphere_mate/mediaconvert.py
|
9838b4483bac9b86f1105f0a848a4b806fd2a952
|
[
"MIT"
] |
permissive
|
tsuttsu305/troposphere_mate-project
|
f04bb6a3d137be3e265652c626008edfbb670b55
|
15ee94cc913efb32bc991979efcad943c992074c
|
refs/heads/master
| 2023-06-07T15:07:47.041944
| 2021-07-05T02:02:00
| 2021-07-05T02:02:00
| 285,152,616
| 0
| 0
|
MIT
| 2020-08-05T02:08:01
| 2020-08-05T02:08:00
| null |
UTF-8
|
Python
| false
| false
| 4,199
|
py
|
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.mediaconvert
from troposphere.mediaconvert import (
AccelerationSettings as _AccelerationSettings,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class AccelerationSettings(troposphere.mediaconvert.AccelerationSettings, Mixin):
def __init__(self,
title=None,
Mode=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Mode=Mode,
**kwargs
)
super(AccelerationSettings, self).__init__(**processed_kwargs)
class JobTemplate(troposphere.mediaconvert.JobTemplate, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
SettingsJson=REQUIRED, # type: dict
AccelerationSettings=NOTHING, # type: _AccelerationSettings
Category=NOTHING, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
Name=NOTHING, # type: Union[str, AWSHelperFn]
Priority=NOTHING, # type: int
Queue=NOTHING, # type: Union[str, AWSHelperFn]
StatusUpdateInterval=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: dict
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
SettingsJson=SettingsJson,
AccelerationSettings=AccelerationSettings,
Category=Category,
Description=Description,
Name=Name,
Priority=Priority,
Queue=Queue,
StatusUpdateInterval=StatusUpdateInterval,
Tags=Tags,
**kwargs
)
super(JobTemplate, self).__init__(**processed_kwargs)
class Preset(troposphere.mediaconvert.Preset, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
SettingsJson=REQUIRED, # type: dict
Category=NOTHING, # type: Union[str, AWSHelperFn]
Description=NOTHING, # type: Union[str, AWSHelperFn]
Name=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: dict
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
SettingsJson=SettingsJson,
Category=Category,
Description=Description,
Name=Name,
Tags=Tags,
**kwargs
)
super(Preset, self).__init__(**processed_kwargs)
class Queue(troposphere.mediaconvert.Queue, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Description=NOTHING, # type: Union[str, AWSHelperFn]
Name=NOTHING, # type: Union[str, AWSHelperFn]
PricingPlan=NOTHING, # type: Union[str, AWSHelperFn]
Status=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: dict
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Description=Description,
Name=Name,
PricingPlan=PricingPlan,
Status=Status,
Tags=Tags,
**kwargs
)
super(Queue, self).__init__(**processed_kwargs)
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
1899821d1c3eaf6004295b733bc194f653d70da4
|
12d0f444452d3b2218cd270756283a0463d3e796
|
/sg/models/load_prediction_wavelet.py
|
b13316b5e37081b7f86f095a821b4fc458790adc
|
[] |
no_license
|
dal3006/load_forecasting-1
|
107ffdbb4648989ba85fa8ba39ecdddb9c24ddd1
|
d324a711a1a0c7ccd9587e0ecf9988a12214a1a3
|
refs/heads/master
| 2023-03-17T07:44:43.487863
| 2015-03-12T15:24:37
| 2015-03-12T15:24:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
"""Evolve a load predictor with BSpline data cleansing and a wavelet predictor."""
import random
from pyevolve import GAllele
import Oger
import sg.utils
import sg.utils.pyevolve_utils as pu
from model import Model
import wavelet
import load_cleansing
import load_prediction
class WaveletModelCreator(load_prediction.ModelCreator):
def _add_transform_genes(self):
"""This is where the models are defined. The models are passed to the
GA engine for evolution of the optimal set of parameters. Afterwards,
the models are tested, and performance is measured."""
self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # Scale
self._alleles.add(pu.make_choice_gene(1, [2])) # Aj, in the paper 2 gives best results.
self._loci_list += ['scale', 'Aj']
def _get_transform(self):
#return wavelet.linear_prediction
#return wavelet.linear_vector
#return wavelet.vector_multiscale_prediction
#return wavelet.iterative_multiscale_prediction
return wavelet.multiscale_prediction
if __name__ == "__main__":
load_prediction.run(WaveletModelCreator)
|
[
"axel.tidemann@gmail.com"
] |
axel.tidemann@gmail.com
|
13dab674d4c2fbd9022814b00023b34586aae4a1
|
42baf2da51e09248c8c31f69a69519364478dea3
|
/pdfextractor.py
|
5238709997df2bd5cca0b597322c49deef68a896
|
[] |
no_license
|
imkrishna588/Pdftotext
|
b36966b2fe568520a2ace00cc0852ac8b6852d67
|
632de0cfc53bc70c34d8bd8aff3f50866c1eb369
|
refs/heads/master
| 2022-12-16T01:02:13.337109
| 2020-09-15T12:34:31
| 2020-09-15T12:34:31
| 286,347,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,147
|
py
|
#! /usr/bin/python3
import os,distro
import sys
import re
import cv2
import json
import requests
import pytesseract
import numpy as np
import pandas as pd
import mysql.connector
import pymssql
import shutil
import urllib
import pdftotext
from time import sleep
from PIL import Image, ImageEnhance, ImageOps
from io import BytesIO
from distutils.dir_util import copy_tree
from smb.SMBConnection import SMBConnection
from xvfbwrapper import Xvfb
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select, WebDriverWait
from selenium.common import exceptions as E
from datetime import datetime,timedelta
from dateutil.parser import parse
from tabula import read_pdf
import tabula
header = ['discom_name','bill_for','gst_number','bill_number','bill_date','bill_period','cin','website','email']
dict1 = {}
dict1 = { 'header': { },
'client': { }}
file1 = open("abc.txt","r")
a = file1.readlines()
txt1 = ''
for n,i in enumerate(a):
txt1+=i.replace(' ','</tr>')+'<tr={}>'.format(n)
# txt1+=i
# print (txt1)
print ('****************************************************************************************')
c4 = re.compile('FOR(.*?)GST:', re.DOTALL)
v1 = c4.search(str(txt1), re.IGNORECASE).group(1).strip()
c5 = re.compile('GST:\s+(.*?)Bill', re.DOTALL)
v2 = c5.search(str(txt1), re.IGNORECASE).group(1).strip()
c6 = re.compile('Bill No.: (.*?)Bill Date', re.DOTALL)
v3 = c6.search(str(txt1), re.IGNORECASE).group(1).strip()
c7 = re.compile('Bill Date:(.*?)Bill Period', re.DOTALL)
v4 = c7.search(str(txt1), re.IGNORECASE).group(1).strip()
c8 = re.compile('Bill Period:(.*?)CIN:', re.DOTALL)
v5 = c8.search(str(txt1), re.IGNORECASE).group(1).strip()
c9 = re.compile('CIN:(.*?)Website:', re.DOTALL)
v6 = c9.search(str(txt1), re.IGNORECASE).group(1).strip()
c10 = re.compile('Website:(.*?)E-mail:', re.DOTALL)
v7 = c10.search(str(txt1), re.IGNORECASE).group(1).strip()
c11 = re.compile('E-mail:(.*?)\s{4}', re.DOTALL)
v8 = c11.search(str(txt1), re.IGNORECASE).group(1).strip()
######################## client #############################
comp_name = str(a[7]).strip()
d1 = re.compile('<tr=8>.*?</tr>.*?</tr></tr>.*?<tr=9>(.*?)</tr>', re.DOTALL)
a1 = d1.search(str(txt1), re.IGNORECASE).group(1).strip()
d2 = re.compile('<tr=12>.*?Phone -(.*?)</tr>', re.DOTALL)
a2 = d2.search(str(txt1), re.IGNORECASE).group(1).strip()
d3 = re.compile('<tr=12>.*?E-mail -(.*?)</tr>', re.DOTALL)
a3 = d3.search(str(txt1), re.IGNORECASE).group(1).strip()
################################ subdivision ########################
d4 = re.compile('<tr=7></tr>.*?</tr>(.*?)<tr=8>', re.DOTALL)
a4 = d4.search(str(txt1), re.IGNORECASE).group(1).strip()
d5 = re.compile('<tr=10></tr></tr>(.*?)<tr=11>', re.DOTALL)
a5 = d5.search(str(txt1), re.IGNORECASE).group(1).strip().replace('\n','')
d6 = re.compile('<tr=11></tr></tr>(.*?)</tr></tr>', re.DOTALL)
a6 = d6.search(str(txt1), re.IGNORECASE).group(1).strip()
combo = a5 +' '+a6
d7 = re.compile('Office Code:(.*?)<tr=13>', re.DOTALL)
a7 = d7.search(str(txt1), re.IGNORECASE).group(1).strip()
########################## complaint_center ################################
d8 = re.compile('<tr=11></tr></tr>.*?</tr></tr>(.*?)<tr=12>', re.DOTALL)
a8 = d8.search(str(txt1), re.IGNORECASE).group(1).strip()
d9 = re.compile('<tr=13>.*?</tr></tr></tr></tr>(.*?)<tr=14>', re.DOTALL)
a9 = d9.search(str(txt1), re.IGNORECASE).group(1).strip()
combo2 = a8+','+a9
########################### due_details ######################################
d10 = re.compile('<tr=17>(.*?)</tr>', re.DOTALL)
a10 = d10.search(str(txt1), re.IGNORECASE).group(1).strip()
d11 = re.compile('<tr=21>(.*?)<tr=22>', re.DOTALL)
a11 = d11.search(str(txt1), re.IGNORECASE).group(1).strip()
d12 = re.compile('Due Date Amount:(.*?)<tr=47>', re.DOTALL)
a12 = d12.search(str(txt1), re.IGNORECASE).group(1).strip()
d13 = re.compile('Amt after Due Date:(.*?)<tr=48>', re.DOTALL)
a13 = d13.search(str(txt1), re.IGNORECASE).group(1).strip()
########################### plan #######################################
d14 = re.compile('Due Date</tr></tr>(.*?)<tr=20>', re.DOTALL)
a14 = d14.search(str(txt1), re.IGNORECASE).group(1).strip()
a14 = a14.split(' ')
d15 = re.compile('<tr=22></tr></tr></tr>(.*?)Consumer Status', re.DOTALL)
a15 = d15.search(str(txt1), re.IGNORECASE).group(1).strip()
d16 = re.compile('Total Amt. after Due Date:</tr>.*?</tr>(.*?)<tr=25>', re.DOTALL)
a16 = d16.search(str(txt1), re.IGNORECASE).group(1).strip()
a16 = a16.split(' ')
############################### meter_reading ##############################################
d17 = re.compile('Current Reading Date.*?:(.*?)<tr=16>', re.DOTALL)
a17 = d17.search(str(txt1), re.IGNORECASE).group(1).strip()
d18 = re.compile('Previous Reading Date.*?:(.*?)<tr=19>', re.DOTALL)
a18 = d18.search(str(txt1), re.IGNORECASE).group(1).strip()
d19 = re.compile('Consumption.*?:(.*?)<tr=21>', re.DOTALL)
a19 = d19.search(str(txt1), re.IGNORECASE).group(1).strip()
d20 = re.compile('Trans. Loss.*?:(.*?)<tr=23>', re.DOTALL)
a20 = d20.search(str(txt1), re.IGNORECASE).group(1).strip()
d21 = re.compile('<tr=25></tr></tr></tr></tr></tr></tr>.*?:(.*?)<tr=26>', re.DOTALL)
a21 = d21.search(str(txt1), re.IGNORECASE).group(1).strip()
################################### account ##################################################
d22 = re.compile('<tr=29></tr></tr></tr></tr>(.*?)<tr=30>', re.DOTALL)
a22 = d22.search(str(txt1), re.IGNORECASE).group(1).strip()
d23 = re.compile('<tr=29></tr></tr></tr></tr>(.*?)<tr=30>', re.DOTALL)
a23 = d23.search(str(txt1), re.IGNORECASE).group(1).strip()
################################# last_payment_detail #######################################
d24 = re.compile('<tr=32></tr></tr></tr></tr>(.*?)<tr=33>', re.DOTALL)
a24 = d24.search(str(txt1), re.IGNORECASE).group(1).strip()
a24 = str(a24).split(' ')
############################## discom_payment #######################################
d25 = re.compile(' BENEFICIARY NAME(.*?)<tr=36>', re.DOTALL)
a25 = d25.search(str(txt1), re.IGNORECASE).group(1).strip()
d26 = re.compile('A/C NO.</tr>(.*?)<tr=37>', re.DOTALL)
a26 = d26.search(str(txt1), re.IGNORECASE).group(1).strip()
d27 = re.compile('IFSC CODE</tr>(.*?)<tr=38>', re.DOTALL)
a27 = d27.search(str(txt1), re.IGNORECASE).group(1).strip()
d28 = re.compile('BANK BRANCH(.*?)<tr=39>', re.DOTALL)
a28 = d28.search(str(txt1), re.IGNORECASE).group(1).strip()
############################## meter_reading_detail part 1 ##################################
d29 = re.compile('<tr=53>(.*?)<tr=54>', re.DOTALL)
a29 = d29.search(str(txt1), re.IGNORECASE).group(1).strip()
a29 = a29.split(' ')
############################## meter_reading_detail part 2 ##################################
d30 = re.compile('<tr=54>(.*?)<tr=55>', re.DOTALL)
a30 = d30.search(str(txt1), re.IGNORECASE).group(1).strip()
a30 = a30.split(' ')
############################## meter_reading_detail part 3 ##################################
d31 = re.compile('<tr=55>(.*?)<tr=56>', re.DOTALL)
a31 = d31.search(str(txt1), re.IGNORECASE).group(1).strip()
a31 = a31.split(' ')
############################ allowed_service ##############################
d32 = re.compile('<tr=59>(.*?)<tr=65>', re.DOTALL)
a32 = d32.search(str(txt1), re.IGNORECASE).group(1).strip()
a32 = a32.split(' ')
################################ particulars ###########################################
d33 = re.compile('</tr> Bill Amount(.*?)<tr=82>', re.DOTALL)
a33 = d33.search(str(txt1), re.IGNORECASE).group(1).strip()
# print (a33)
a33 = a33.split('. ')
part = []
for i in a33:
part2=[]
for p,j in enumerate(i.split('\n')):
part2.append(j)
part.append(part2)
part3=[]
for t1 in part:
if len(t1)>=1:
for t2 in t1:
t2 = re.sub('<tr.*?>|</tr>| ','',str(t2)).strip()
part3.append(t2)
else:
pass
last=[]
for t4 in part3:
t4 = re.sub('^\d{1,2}.','',str(t4)).strip()
t4 = re.sub(r'\s{1,}\d+$', '', t4)
if t4.startswith('Power Factor') or t4.startswith('Shunt Capacitor') or t4.startswith('Unauthorized Consumption'):
# t4=re.split("\d{2,}\s*\(....", t4)
t4=re.split("\d{2,}\s*\(....", t4)
for k2 in t4:
last.append(k2)
last.append(t4)
print (last)
print (len(last))
# for v,t in enumerate(j):
# print (v,t)
# if len(j) >1:
# for t in j:
# part2.append(t)
# else:
# for t in j
# part.append(part2)
# print ('final data = = = = = =',len(part))
# d34 = re.compile('<tr=.*?>.*?\d{1,2}.(.*?)\s{11}\d{1,5}.', re.DOTALL)
# a34 = d34.findall(str(a33), re.IGNORECASE)
# # print (a34)
# for i in a34:
# print (i)
# dict1['header']={'discom_name':a[0],'bill_for':v1}
# # c4 = re.compile('<tbody>(.*?)</tbody>', re.DOTALL)
# # v1 = c4.search(str(i), re.IGNORECASE).group(1)
# print (dict1)
file1.close()
# df = read_pdf('KOT074.pdf')
# print (df[])
# df[1].to_csv('file2.csv', header=True, index=True)
# path = 'KOT074.pdf'
# df = tabula.read_pdf(path, pages = '2', multiple_tables = True, output_format='json')
# for i in df:
# print (i)
# # for j in i:
# # print (j)
# dict1 = {}
# with open('KOT074.pdf','rb') as f:
# pdf=pdftotext.PDF(f)
# for j in pdf:
# print (j)
# json_data = json.dumps(j, indent=10)
# l2=str(j).split('\n')
# print (l2)
# l1=[]
# for k in l2:
# name = re.sub('\s{2,5}','<tr>',k)
# name = re.sub('<tr>{1,}','>',name)
# name = re.sub('>>','',name)
# if name.startswith('>'):
# l1.append(name)
# print (l1)
# # l3 = k.split('\n')
# for l in k:
# print (l)
# # name = re.sub('^\s*','',l)
# # print (name)
# # print ('*****************************************')
|
[
"noreply@github.com"
] |
imkrishna588.noreply@github.com
|
731fca2dbba25741f97aec378171645ad494bf50
|
f85cfed4ae3c54b5d31b43e10435bb4fc4875d7e
|
/attacking_anti_tamper/taint_cpp/extern/Triton/src/testers/unittests/test_ast_deep.py
|
6b14f85a22f4759e7f7d54b0d8fc167d7a44384e
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
archercreat/dta-vs-osc
|
2f495f74e0a67d3672c1fc11ecb812d3bc116210
|
b39f4d4eb6ffea501025fc3e07622251c2118fe0
|
refs/heads/main
| 2023-08-01T01:54:05.925289
| 2021-09-05T21:00:35
| 2021-09-05T21:00:35
| 438,047,267
| 1
| 1
|
MIT
| 2021-12-13T22:45:20
| 2021-12-13T22:45:19
| null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
#!/usr/bin/env python2
# coding: utf-8
"""Test deep AST."""
import unittest
from triton import *
DEPTH = 1000
class TestDeep(unittest.TestCase):
"""Test deep AST."""
def setUp(self):
"""Define the arch."""
self.triton = TritonContext()
self.triton.setArchitecture(ARCH.X86_64)
self.ctx = self.triton.getAstContext()
self.sym_var = self.ctx.variable(self.triton.symbolizeRegister(self.triton.registers.rax))
self.triton.setConcreteRegisterValue(self.triton.registers.rbx, 0)
add_inst = Instruction()
add_inst.setAddress(0x100)
add_inst.setOpcode(b"\x48\x01\xC3") # add rbx, rax
sub_inst = Instruction()
sub_inst.setOpcode(b"\x48\x29\xC3") # sub rbx, rax
# We subtract and add the same symbolic value from rbx N times
for _ in range(DEPTH):
self.triton.processing(add_inst)
sub_inst.setAddress(add_inst.getAddress() + add_inst.getSize())
self.triton.processing(sub_inst)
add_inst.setAddress(sub_inst.getAddress() + sub_inst.getSize())
# And finally add symbolic variable ones
add_inst.setAddress(add_inst.getAddress() + add_inst.getSize())
self.triton.processing(add_inst)
# Now rbx has `SymVar_0` value
self.complex_ast_tree = self.triton.getSymbolicRegister(self.triton.registers.rbx).getAst()
def test_z3_conversion(self):
result = self.triton.simplify(self.complex_ast_tree, True)
self.assertEqual(str(result), str(self.sym_var))
def test_duplication(self):
s = self.ctx.duplicate(self.complex_ast_tree)
def test_symbolic_variable_update(self):
self.triton.setConcreteVariableValue(self.sym_var.getSymbolicVariable(), 0xdeadbeaf)
self.assertEqual(self.complex_ast_tree.evaluate(), 0xdeadbeaf)
|
[
"sebi@quantstamp.com"
] |
sebi@quantstamp.com
|
04ec8bfa3575146bc845b1817a97b4dead5d07ab
|
2cf9e71865128b92e8c8dc0eea15d577786908de
|
/generate_markdown.py
|
45372e4aa07252629bbc35d9144684e0beca26c3
|
[] |
no_license
|
weaming/portfolio
|
9f843cef656ce94c179bdac49f60d5a5b1ec8252
|
dc95a7748dc0821c3bc081897dcd7cc5f3b1c4ab
|
refs/heads/master
| 2020-03-14T14:12:33.347532
| 2019-07-11T12:38:42
| 2019-07-11T12:38:57
| 131,648,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
#!/usr/bin/env python
# coding: utf-8
import json
with open('repos.json') as f:
repos = json.load(f)
with open('projects-show.txt') as f:
included = list(l.strip() for l in f if l.strip()[0] not in '# \n')
print(included)
with open('config.json') as f:
cfg = json.load(f)
repos_map = {x['name'].lower(): x for x in repos if x['owner']['login'] == cfg['username']}
repos_filtered = [repos_map[x.lower()] for x in included if x.lower() in repos_map]
md_repo_template = u"""## {name} [source]({html_url})
- Star: {stargazers_count} Watch: {watchers_count} Fork: {forks_count}
- Created: {created_at}
- Updated: {pushed_at}
{description}
"""
md_template = u"""# Portfolio
These are my projects on github.
{}
"""
repo_text_list = [md_repo_template.format(**repo) for repo in sorted(
repos_filtered,
key=lambda x: sum(x[k] for k in [
'stargazers_count',
'watchers_count',
'forks_count',
]),
reverse=True)]
with open('Portfolio.md', 'wb') as f:
f.write(md_template.format('\n'.join(repo_text_list)).encode('utf8'))
|
[
"garden.yuen@gmail.com"
] |
garden.yuen@gmail.com
|
a7c2bab8989b59f48542db0530b5956dd956d663
|
c1538bb7e6b5f985dbcfe662a034cd86f33621d7
|
/bots/FR/Inmoov_AI_deprecated/deprecated/PYTHON/INMOOV-AI_opencv.py
|
94c6c8fd7afc0742c0af5e0304a54d7a84cc2e81
|
[
"Apache-2.0"
] |
permissive
|
lecagnois/aiml
|
0e28fe113bf719d7c012c42fadebd87f65e96edc
|
10c760e0260af59cd0a39bc59655f6e2fceb9642
|
refs/heads/master
| 2021-01-11T11:02:47.655458
| 2016-12-22T21:29:47
| 2016-12-22T21:29:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,227
|
py
|
# -*- coding: utf-8 -*-
from decimal import Decimal
global FaceDetectedCounter
FaceDetectedCounter=0
global FaceDetected
FaceDetected=1
global startTimerFunction
global posxSquare
global posySquare
global WidthSquare
WidthSquare=Decimal(0)
global FaceHadMoved
global DistanceOfTheFace
global ArtefactFace
###############################################################
# SETUP
###############################################################
# Facefilter try to detect a face, when we are sure it's a face :
ArtefactFace=15 #frames
###############################################################
DistanceOfTheFace=10
FaceHadMoved=[0,0,0,0,0,0] # MoveLeftRight,MoveTopBottom,MoveFrontBack,MoveX,MoveY,MoveZ
posxSquare=Decimal(0)
posySquare=Decimal(0)
python.subscribe(opencv.getName(),"publishOpenCVData")
NoFaceDetectedTimer = Runtime.start("NoFaceDetectedTimer","Clock")
NoFaceDetectedTimer.setInterval(20000)
openCvModule="nothing"
def NoFaceDetectedTimerFunction(timedata):
global startTimerFunction
startTimerFunction+=1
if startTimerFunction==2:
FaceDetected=1
chatBot.getResponse("SYSTEM FACENOTDETECTED")
NoFaceDetectedTimer.addListener("pulse", python.name, "NoFaceDetectedTimerFunction")
# start the clock
#####################################################
# EYETRACKING
#####################################################
def trackHumans():
#i01.headTracking.findFace()
#i01.opencv.SetDisplayFilter
openCvInit()
i01.startEyesTracking(leftPort,22,24)
i01.startHeadTracking(leftPort)
i01.eyesTracking.pid.setPID("eyeX",12.0,1.0,0.1)
i01.eyesTracking.pid.setPID("eyeY",12.0,1.0,0.1)
i01.headTracking.faceDetect()
i01.eyesTracking.faceDetect()
head.eyeX.setVelocity(0)
head.eyeY.setVelocity(0)
head.rothead.setVelocity(0)
head.neck.setVelocity(0)
head.eyeX.setSpeed(1)
head.eyeY.setSpeed(1)
head.rothead.setSpeed(1)
head.neck.setSpeed(1)
def StoptrackHumans():
#i01.headTracking.findFace()
#i01.opencv.SetDisplayFilter
i01.eyesTracking.stopTracking()
opencv.removeFilters()
opencv.stopCapture()
#####################################################
# TAKE A PHOTO
#####################################################
def TakePhoto(messagePhoto):
openCvInit()
try:
i01.startEyesTracking(leftPort,22,24)
i01.eyesTracking.faceDetect()
except:
print "opencv error"
talkBlocking(messagePhoto)
global openCvModule
openCvModule = "photo"
global FaceDetected
global FaceDetectedCounter
global startTimerFunction
FaceDetectedCounter=0
FaceDetected=0
Light(0,0,0)
startTimerFunction=0
NoFaceDetectedTimer.startClock()
def PhotoProcess(messagePhoto):
global FaceDetected
Light(1,1,1)
FaceDetectedCounter=0
FaceDetected=1
NoFaceDetectedTimer.stopClock()
talkBlocking(messagePhoto)
Light(1,1,1)
talkBlocking("chi i i i i i i i i ize")
sleep(0.5)
Light(0,0,0)
sleep(0.1)
Light(1,1,1)
sleep(0.1)
Light(0,0,0)
sleep(0.1)
Light(1,1,1)
sleep(0.1)
try:
i01.stopTracking()
except:
print "opencv error"
opencv.removeFilters()
opencv.stopCapture()
sleep(1)
opencv.setInputSource("camera")
opencv.setCameraIndex(0)
opencv.capture()
sleep(0.5)
Light(0,0,0)
photoFileName = opencv.recordSingleFrame()
#print "name file is" , os.getcwd()+'\\'+str(photoFileName)
Light(1,1,1)
DisplayPic(os.getcwd()+'\\'+str(photoFileName))
opencv.removeFilters()
opencv.stopCapture()
#i01.startEyesTracking(leftPort,22,24)
#i01.startHeadTracking(leftPort)
#####################################################
# OPENCVINIT
#####################################################
def openCvInit():
opencv.setCameraIndex(CameraIndex)
opencv.removeFilters()
opencv.addFilter("PyramidDown")
opencv.addFilter("Gray")
opencv.addFilter("FaceDetect")
opencv.setDisplayFilter("FaceDetect")
opencv.capture()
def onOpenCVData(data):
#####################################################
# This is opencv functions that do jobs
#####################################################
global FaceDetected
global posxSquare
global posySquare
global openCvModule
global WidthSquare
global FaceHadMoved
global FaceDetectedCounter
global DistanceOfTheFace
global MoveEyesRandom
global ArtefactFace
#####################################################
# openCvModule=="photo" : just detect one face
#####################################################
if openCvModule=="photo":
if data.getBoundingBoxArray() != None:
if not data.getBoundingBoxArray():
FaceDetectedCounter=0
else:
FaceDetectedCounter+=1
if FaceDetectedCounter>ArtefactFace and FaceDetected==0:
NoFaceDetectedTimer.stopClock()
FaceDetected=1
chatBot.getResponse("SYSTEM FACEDETECTED")
#####################################################
# openCvModule=="CalcDistance" : how far is the face
#####################################################
if openCvModule=="CalcDistance":
if data.getBoundingBoxArray() != None:
if not data.getBoundingBoxArray():
FaceDetectedCounter=0
FaceDetected=0
else:
FaceDetectedCounter+=1
if FaceDetectedCounter>ArtefactFace:
FaceDetected=1
rect = data.getBoundingBoxArray().get(0)
print rect.width
DistanceOfTheFace=rect.width
else:
FaceDetected=0
FaceDetectedCounter=0
#####################################################
# openCvModule=="123" : just detect if detected face is mooving in the space. 1.2.3 soleil :)
#####################################################
if openCvModule=="123":
#Tweak speed movement of the head
openCvModulesensibilityLeftRightMin=0.05
openCvModulesensibilityLeftRightMax=0.2
openCvModulesensibilityFrontBackMin=0.01
openCvModulesensibilityFrontBackMax=0.1
#if something is detected
if data.getBoundingBoxArray() != None:
if data.getBoundingBoxArray():
#get the first face detected
rect = data.getBoundingBoxArray().get(0)
MoveLeftRight=abs(posxSquare-Decimal(rect.x))
#just to tune the demo detect if it's a left or right move
MoveTopBottom=abs(posySquare-Decimal(rect.y))
MoveFrontBack=abs(WidthSquare-Decimal(rect.width))
#We wait to be sure it is not artefact
FaceDetectedCounter+=1
if FaceDetectedCounter>ArtefactFace:
#ok we detect i the face move left/right/front/back
if posxSquare != 0 and Decimal(rect.x) != 0 and Decimal(rect.y) != 0 and Decimal(rect.width) != 0 and MoveFrontBack < openCvModulesensibilityFrontBackMax and MoveTopBottom < openCvModulesensibilityLeftRightMax and MoveLeftRight < openCvModulesensibilityLeftRightMax and posySquare != 0 and MoveLeftRight !=0 and MoveTopBottom !=0:
print MoveFrontBack
#left/right move
#tune demo
MoveX=0
MoveY=0
MoveZ=0
if ((MoveLeftRight >= openCvModulesensibilityLeftRightMin ) or (MoveTopBottom >= openCvModulesensibilityLeftRightMin ) or (MoveFrontBack>=openCvModulesensibilityFrontBackMin )):
if MoveLeftRight >= openCvModulesensibilityLeftRightMin:
if posxSquare-Decimal(rect.x)>0:
MoveX="Left"
else:
MoveX="Right"
if MoveTopBottom >= openCvModulesensibilityLeftRightMin:
if posySquare-Decimal(rect.y)>0:
MoveY="Top"
else:
MoveY="Bottom"
if MoveFrontBack>=openCvModulesensibilityFrontBackMin:
if WidthSquare-Decimal(rect.width) >0:
MoveZ="Back"
else:
MoveZ="Front"
print "MOVE DETECTED :",MoveLeftRight,MoveTopBottom,MoveFrontBack,MoveX,MoveY,MoveZ
FaceHadMoved=[MoveLeftRight,MoveTopBottom,MoveFrontBack,MoveX,MoveY,MoveZ]
#talk("Tu as bougé!")
FaceDetectedCounter=0
# Store the information in rect
else:
FaceDetectedCounter=0
posxSquare = Decimal(rect.x) # Get the x position of the corner
posySquare = Decimal(rect.y) # Get the y position of the corner
WidthSquare = Decimal(rect.width)
DistanceOfTheFace = WidthSquare
# Get the width
h = rect.height
else:
FaceDetectedCounter=0
|
[
"moz4r@free.fr"
] |
moz4r@free.fr
|
4ca1975ac9903928546e0c425d38b5e9ab0172b9
|
014023477bf619b4b3dd4deb2730f3dcb8621178
|
/bidaf/my/tensorflow/nn.py
|
26ebde7af1b6c1273e40d58c83d9167441504bc5
|
[
"Apache-2.0"
] |
permissive
|
killa1218/BiDAF-Keyword-Extraction
|
88a8e86ebcdd9f706c39556a362ec3e09ff03455
|
c1986508da871184881bf1c0f902ceaaf1936c8f
|
refs/heads/master
| 2021-03-24T09:11:45.866107
| 2017-08-06T14:03:52
| 2017-08-06T14:03:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,840
|
py
|
from tensorflow.contrib.rnn.python.ops.rnn_cell import _linear
from tensorflow.python.util import nest
import tensorflow as tf
from my.tensorflow import flatten, reconstruct, add_wd, exp_mask
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
with tf.variable_scope(scope or "linear"):
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
with tf.name_scope(name or "dropout"):
if keep_prob < 1.0:
d = tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
out = tf.cond(is_train, lambda: d, lambda: x)
return out
return x
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask=mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def double_linear_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "Double_Linear_Logits"):
first = tf.tanh(linear(args, size, bias, bias_start=bias_start, scope='first',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train))
second = linear(first, 1, bias, bias_start=bias_start, squeeze=True, scope='second',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
if mask is not None:
second = exp_mask(second, mask)
return second
def linear_logits(args, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "Linear_Logits"):
logits = linear(args, 1, bias, bias_start=bias_start, squeeze=True, scope='first',
wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
if mask is not None:
logits = exp_mask(logits, mask)
return logits
def sum_logits(args, mask=None, name=None):
with tf.name_scope(name or "sum_logits"):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
rank = len(args[0].get_shape())
logits = sum(tf.reduce_sum(arg, rank-1) for arg in args)
if mask is not None:
logits = exp_mask(logits, mask)
return logits
def get_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None, func=None):
if func is None:
func = "sum"
if func == 'sum':
return sum_logits(args, mask=mask, name=scope)
elif func == 'linear':
return linear_logits(args, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'double':
return double_linear_logits(args, size, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'dot':
assert len(args) == 2
arg = args[0] * args[1]
return sum_logits([arg], mask=mask, name=scope)
elif func == 'mul_linear':
assert len(args) == 2
arg = args[0] * args[1]
return linear_logits([arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
elif func == 'proj':
assert len(args) == 2
d = args[1].get_shape()[-1]
proj = linear([args[0]], d, False, bias_start=bias_start, scope=scope, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
return sum_logits([proj * args[1]], mask=mask)
elif func == 'tri_linear':
assert len(args) == 2
new_arg = args[0] * args[1]
return linear_logits([args[0], args[1], new_arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob,
is_train=is_train)
else:
raise Exception()
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob, is_train=is_train)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, is_train=None, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
if is_train is not None and keep_prob < 1.0:
in_ = dropout(in_, keep_prob, is_train)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, is_train=None, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, is_train=is_train, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, 2)
return concat_out
|
[
"jaytin1218@hotmail.com"
] |
jaytin1218@hotmail.com
|
2f522513ddaae907e17cb286ee18979090f0a08e
|
4996b3152c10ff0f4b3cd9f08e085b6c9c8ba883
|
/SnakeAI/QLearning.py
|
1da9d0ecb2d27dc8f9d1506210c6b7f2dfc7c202
|
[] |
no_license
|
why116600/AIResearch
|
eb2f23dafa57ce8324b3b3706845dd89b19dd90f
|
59b809b4ee4e44cdd0af091c23f580a288b93dbb
|
refs/heads/master
| 2023-01-22T18:08:35.789329
| 2020-11-25T12:36:21
| 2020-11-25T12:36:21
| 315,596,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
import numpy as np
import random
import sys
import os
from SnakeAI import SnakeEnv
from SnakeAI import Network
class DoubleQLearningAgent:
def __init__(self,env,action_n=4,gamma=0.9,learning_rate=0.1):
self.env=env
self.gamma=gamma
self.learning_rate=learning_rate
self.q0={}
self.q1={}
self.action_n=action_n
def Save(self,filename):
with open(filename,'w') as fp:
fp.write('%d,%d,%d\n'%(self.env.grid_size,len(self.q0.keys()),len(self.q1.keys())))
for state,action in self.q0.keys():
for i in range(len(state)):
fp.write('%d,'%state[i])
fp.write('%d,'%action)
fp.write(str(self.q0[(state,action)]))
fp.write('\n')
for state,action in self.q1.keys():
for i in range(len(state)):
fp.write('%d,'%state[i])
fp.write('%d,'%action)
fp.write(str(self.q1[(state,action)]))
fp.write('\n')
def Open(self,filename):
with open(filename,'r') as fp:
head=fp.readline()
gridsize,nq0,nq1=[int(s) for s in head.split(',')]
self.q0={}
self.q1={}
for i in range(nq0):
line=fp.readline()
data=line.split(',')
if len(data)<=2:
continue
state=tuple([int(s) for s in data[:-2]])
action=int(data[-2])
value=float(data[-1])
self.q0[(state,action)]=value
for i in range(nq1):
line=fp.readline()
data=line.split(',')
if len(data)<=2:
continue
state=tuple([int(s) for s in data[:-2]])
action=int(data[-2])
value=float(data[-1])
self.q1[(state,action)]=value
def GetQ0Value(self,state,action):
value=0.0
if state in self.q0.keys():
value=self.q0[(state,action)]
return value
def GetQ1Value(self,state,action):
value=0.0
if state in self.q1.keys():
value=self.q1[(state,action)]
return value
def GetActionValue(self,state,action):
q0=0.0
q1=0.0
if state in self.q0.keys():
q0=self.q0[(state,action)]
if state in self.q1.keys():
q1=self.q1[state,action]
return q0+q1
def ActionFromPolicy(self,state,epsilon=0.1):
if random.random()>epsilon:
action=np.argmax([self.GetActionValue(state,a) for a in range(self.action_n)])
else:
action=random.randint(0,self.action_n-1)
return action
def Learn(self,state,action,reward,next_state,done):
if np.random.randint(2):
self.q0,self.q1=self.q1,self.q0
A=np.argmax([self.GetQ0Value(next_state,i) for i in range(self.action_n)])
U=reward+self.gamma*self.GetQ1Value(next_state,A)*(1-done)
td_error=U-self.GetQ0Value(state,action)
if (state,action) in self.q0.keys():
self.q0[(state,action)]+=self.learning_rate*td_error
else:
self.q0[(state,action)]=self.learning_rate*td_error
def Train(self,state):
state=tuple(state.reshape(self.env.grid_size))
A=self.ActionFromPolicy(state)
S,R,finish,remark=self.env.step(A)
S=tuple(S.reshape(self.env.grid_size))
self.Learn(state,A,R,S,finish)
self.env.render()
if R!=0.0:
print('got %f score'%(R,))
return R,finish
def main():
if len(sys.argv)<3:
return
envSize=int(sys.argv[1])
trainCount=int(sys.argv[2])
roundLimit=envSize*envSize*envSize*envSize
env=SnakeEnv((envSize,envSize))
agent=DoubleQLearningAgent(env)
if len(sys.argv)>=4 and os.path.exists(sys.argv[3]):
agent.Open(sys.argv[3])
step=0
score=0
print('first:')
env.render()
for i in range(trainCount):
print('train',i)
R,finish=agent.Train(env.state)
score+=R
step+=1
if finish or step>=roundLimit:
env.reset()
print('score:',score)
print('first:')
env.render()
step=0
score=0
print('Training complete!')
if len(sys.argv)>=4:
agent.Save(sys.argv[3])
if __name__=='__main__':
main()
|
[
"wanghy286@mail2.sysu.edu.cn"
] |
wanghy286@mail2.sysu.edu.cn
|
a1606a2e4c7a2c8c0fd5cf6435a2d2747f724c35
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03711/s743526955.py
|
10387c576bd33af4e81220a5154c1c3b05e219ef
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
#Between Two Integers
def ABC_61_A():
x,y = map(int, input().split())
list1 = [1,3,5,7,8,10,12]
list2 = [4,6,9,11]
list3 = [2]
if list1.count(x) == 1 and list1.count(y) == 1:
print('Yes')
elif list2.count(x) == 1 and list2.count(y) == 1:
print('Yes')
else:
print('No')
if __name__ == '__main__':
ABC_61_A()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
24cd4ac77e3b3bb52798159e17ae4b3659e9cfce
|
17a17badd816665053fb69eb3c498d5281e6a0a6
|
/Bishop.py
|
315a8080cfed5f9913b6c7284dddac6c7be906a5
|
[] |
no_license
|
DimitarYordanov17/Chess
|
dc4de78a4ad975251a7ebf5c49552ce2abee6646
|
49ff6826fc2b6bcc12ca903f71c5ef017a737ea4
|
refs/heads/master
| 2022-06-21T07:47:19.228131
| 2020-05-05T12:38:30
| 2020-05-05T12:38:30
| 261,462,075
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
from Figure import Figure
class Bishop(Figure):
def __init__(self, y, x):
super().__init__(y, x)
self.y = y
self.x = x
self.coordinates_list = []
for y in range(8):
for x in range(8):
if y - x == self.y - self.x or y + x == self.y + self.x:
self.coordinates_list.append((y, x))
def __repr__(self):
return "B"
|
[
"hallerra17@gmail.com"
] |
hallerra17@gmail.com
|
15fecaf3a5f1a30cd2e9437b54048f6c095d8840
|
d08e23e19ac81bb3538584b5a22e92d377cd197e
|
/week8/tasks/c2.py
|
443308dee700892df189d553c03f40b35f66efb0
|
[] |
no_license
|
KimSeilkhann/Web-Development-2020
|
971480c25ad2f0781168c8332499fcf24e248375
|
cba7066c83a49ea1cd0edb2c9ea4e8d3c71dbc94
|
refs/heads/master
| 2023-01-18T16:46:40.998146
| 2020-04-22T23:15:31
| 2020-04-22T23:15:31
| 240,965,184
| 0
| 1
| null | 2023-01-07T16:47:09
| 2020-02-16T21:01:12
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
a = int(input())
b = int(input())
c = int(input())
d = int(input())
for i in range(a, b+1):
if i%d==c:
print(i)
|
[
"kimseilkhan@mail.ru"
] |
kimseilkhan@mail.ru
|
08710244c3016d2dc1aaa3a00d3524bcfd19531e
|
ffd35dbad0669348b9151220557900de85ccd552
|
/alerta/alerta-src/settings.py
|
a87dc6d8f13bf12636baa7a217a9a224a1b52a7b
|
[] |
no_license
|
xiaotech/deploy-elastalert
|
0f11b55cdb7866bbd7d2c985ede8d7002318c07d
|
38066bf98cee171d73a55e1e65408084c6eb5ae8
|
refs/heads/master
| 2020-07-19T21:33:34.472065
| 2019-09-05T08:51:05
| 2019-09-05T08:51:05
| 206,517,979
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,503
|
py
|
#
# ***** ALERTA SERVER DEFAULT SETTINGS -- DO NOT MODIFY THIS FILE *****
#
# To override these settings use /etc/alertad.conf or the contents of the
# configuration file set by the environment variable ALERTA_SVR_CONF_FILE.
#
# Further information on settings can be found at https://docs.alerta.io
from typing import Any, Dict, List # noqa
DEBUG = False
BASE_URL = ''
USE_PROXYFIX = False
SECRET_KEY = 'changeme'
# Logging configuration
LOG_CONFIG_FILE = ''
LOG_HANDLERS = ['console'] # ['console', 'file', 'wsgi']
LOG_FILE = 'alertad.log' # NOTE: 'file' must be added to LOG_HANDLERS for logging to work
LOG_MAX_BYTES = 10 * 1024 * 1024 # 10 MB
LOG_BACKUP_COUNT = 2
LOG_FORMAT = 'default' # ['default', 'simple', 'verbose', 'json'] or any valid logging format
LOG_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']
# API settings
ALARM_MODEL = 'ALERTA' # 'ALERTA' (default) or 'ISA_18_2'
QUERY_LIMIT = 10000
DEFAULT_PAGE_SIZE = QUERY_LIMIT # maximum number of alerts returned by a single query
HISTORY_LIMIT = 100 # cap the number of alert history entries
HISTORY_ON_VALUE_CHANGE = True # history entry for duplicate alerts if value changes
# MongoDB (deprecated, use DATABASE_URL setting)
MONGO_URI = 'mongodb://localhost:27017/monitoring'
MONGO_DATABASE = None # can be used to override default database, above
MONGO_RAISE_ON_ERROR = True
# PostgreSQL (deprecated, use DATABASE_URL setting)
POSTGRES_URI = 'postgres://localhost:5432/monitoring' # not used (use DATABASE_URL)
POSTGRES_DB = None
# Database
DATABASE_URL = MONGO_URI # default: MongoDB
DATABASE_NAME = MONGO_DATABASE or POSTGRES_DB
DATABASE_RAISE_ON_ERROR = MONGO_RAISE_ON_ERROR # True - terminate, False - ignore and continue
# Search
DEFAULT_FIELD = 'text' # default field if no search prefix specified (Postgres only)
# Bulk API
BULK_QUERY_LIMIT = 100000 # max number of alerts for bulk endpoints
CELERY_BROKER_URL = None
CELERY_RESULT_BACKEND = None
CELERY_ACCEPT_CONTENT = ['customjson']
CELERY_TASK_SERIALIZER = 'customjson'
CELERY_RESULT_SERIALIZER = 'customjson'
# Authentication settings
AUTH_REQUIRED = False
AUTH_PROVIDER = 'basic' # basic (default), ldap, github, openid, saml2, azure, cognito, gitlab, google, keycloak
ADMIN_USERS = [] # type: List[str]
USER_DEFAULT_SCOPES = ['read', 'write'] # Note: 'write' scope implicitly includes 'read'
CUSTOMER_VIEWS = False
BASIC_AUTH_REALM = 'Alerta'
SIGNUP_ENABLED = True
OAUTH2_CLIENT_ID = None # OAuth2 client ID and secret
OAUTH2_CLIENT_SECRET = None
ALLOWED_EMAIL_DOMAINS = ['*']
# Amazon Cognito
AWS_REGION = 'us-east-1' # US East - N. Virginia (default)
COGNITO_USER_POOL_ID = None
COGNITO_DOMAIN = None
# GitHub OAuth2
GITHUB_URL = 'https://github.com'
ALLOWED_GITHUB_ORGS = ['*']
# GitLab OAuth2
GITLAB_URL = 'https://gitlab.com'
ALLOWED_GITLAB_GROUPS = None
# BasicAuth using LDAP
LDAP_URL = 'ldap://oa.caijj.net:389' # eg. ldap://localhost:389
LDAP_DOMAINS = {'lattebank.com':''} # type: Dict[str, str]
LDAP_DOMAINS_GROUP = {} # type: Dict[str, str]
LDAP_DOMAINS_BASEDN = {} # type: Dict[str, str]
LDAP_ALLOW_SELF_SIGNED_CERT = False
# Microsoft Identity Platform (v2.0)
AZURE_TENANT = 'common' # "common", "organizations", "consumers" or tenant ID
# Keycloak
KEYCLOAK_URL = None
KEYCLOAK_REALM = None
ALLOWED_KEYCLOAK_ROLES = None
# OpenID Connect
OIDC_ISSUER_URL = None
OIDC_AUTH_URL = None
OIDC_VERIFY_TOKEN = False
OIDC_ROLE_CLAIM = OIDC_CUSTOM_CLAIM = 'roles' # JWT claim name whose value is used in role mapping
OIDC_GROUP_CLAIM = 'groups' # JWT claim name whose value is used in customer mapping
ALLOWED_OIDC_ROLES = ALLOWED_GITLAB_GROUPS or ALLOWED_KEYCLOAK_ROLES or ['*']
# SAML 2.0
SAML2_ENTITY_ID = None
SAML2_METADATA_URL = None
SAML2_USER_NAME_FORMAT = '{givenName} {surname}'
SAML2_EMAIL_ATTRIBUTE = 'emailAddress'
SAML2_CONFIG = {} # type: Dict[str, Any]
ALLOWED_SAML2_GROUPS = ['*']
TOKEN_EXPIRE_DAYS = 14
API_KEY_EXPIRE_DAYS = 365 # 1 year
# Audit Log
AUDIT_TRAIL = ['admin'] # possible categories are 'admin', 'write', and 'auth'
AUDIT_LOG = None # set to True to log to application logger
AUDIT_URL = None # send audit log events via webhook URL
# CORS settings
CORS_ALLOW_HEADERS = ['Content-Type', 'Authorization', 'Access-Control-Allow-Origin']
CORS_ORIGINS = [
# 'http://try.alerta.io',
# 'http://explorer.alerta.io',
'http://localhost',
'http://localhost:8000',
r'https?://\w*\.?local\.alerta\.io:?\d*/?.*' # => http(s)://*.local.alerta.io:<port>
]
CORS_SUPPORTS_CREDENTIALS = AUTH_REQUIRED
# Serverity settings
SEVERITY_MAP = {} # type: Dict[str, Any]
DEFAULT_NORMAL_SEVERITY = None
DEFAULT_PREVIOUS_SEVERITY = None
COLOR_MAP = {} # type: Dict[str, Any]
# Timeout settings
DEFAULT_TIMEOUT = 86400
ALERT_TIMEOUT = DEFAULT_TIMEOUT
HEARTBEAT_TIMEOUT = DEFAULT_TIMEOUT
# Housekeeping settings
DEFAULT_EXPIRED_DELETE_HRS = 2 # hours (0 hours = do not delete)
DEFAULT_INFO_DELETE_HRS = 12 # hours (0 hours = do not delete)
# Send verification emails to new BasicAuth users
EMAIL_VERIFICATION = False
SMTP_HOST = 'smtp.gmail.com'
SMTP_PORT = 587
MAIL_LOCALHOST = 'localhost' # mail server to use in HELO/EHLO command
SMTP_STARTTLS = True
SMTP_USE_SSL = False
SSL_KEY_FILE = None
SSL_CERT_FILE = None
MAIL_FROM = '' # replace with valid sender address eg you@gmail.com
SMTP_USERNAME = '' # application-specific username if different to MAIL_FROM user
SMTP_PASSWORD = '' # password for MAIL_FROM (or SMTP_USERNAME if used)
# Web console settings
SITE_LOGO_URL = '' # URL to company logo
DATE_FORMAT_SHORT_TIME = 'HH:mm' # eg. 09:24
DATE_FORMAT_MEDIUM_DATE = 'EEE d MMM HH:mm' # eg. Tue 9 Oct 09:24
DATE_FORMAT_LONG_DATE = 'd/M/yyyy h:mm:ss.sss a' # eg. 9/10/2018 9:24:03.036 AM
DEFAULT_AUDIO_FILE = None # must exist on client at relative path eg. '/audio/alert_high-intensity.ogg' or URL
COLUMNS = [
'severity', 'status', 'lastReceiveTime', 'timeoutLeft', 'duplicateCount',
# 'customer', 'environment', 'service', 'resource', 'event', 'value'
'environment', 'service', 'resource', 'event'
]
SORT_LIST_BY = 'lastReceiveTime' # newest='lastReceiveTime' or oldest='-createTime' (Note: minus means reverse)
# Alert Status Indicators
ASI_SEVERITY = [
'critical', 'error', 'minor', 'warning', 'indeterminate', 'informational'
]
ASI_QUERIES = [
{'text': 'Production', 'query': [['environment', 'Production']]},
{'text': 'Development', 'query': [['environment', 'Development']]},
{'text': 'Heartbeats', 'query': {'q': 'event:Heartbeat'}},
{'text': 'Misc.', 'query': 'group=Misc'},
]
# List of custom actions
ACTIONS = [] # type: List[str]
GOOGLE_TRACKING_ID = None
AUTO_REFRESH_INTERVAL = 5000 # ms
# Plugins
#PLUGINS = ['remote_ip', 'reject', 'heartbeat', 'blackout']
PLUGINS = ['remote_ip', 'heartbeat', 'blackout']
PLUGINS_RAISE_ON_ERROR = True # raise RuntimeError exception on first failure
# reject plugin settings
ORIGIN_BLACKLIST = [] # type: List[str]
# ORIGIN_BLACKLIST = ['foo/bar$', '.*/qux'] # reject all foo alerts from bar, and everything from qux
ALLOWED_ENVIRONMENTS = ['Production', 'Development'] # reject alerts without allowed environments
# blackout settings
BLACKOUT_DURATION = 3600 # default period = 1 hour
NOTIFICATION_BLACKOUT = False # True - set alert status=blackout, False - do not process alert (default)
BLACKOUT_ACCEPT = [] # type: List[str]
# BLACKOUT_ACCEPT = ['normal', 'ok', 'cleared'] # list of severities accepted during blackout period
|
[
"root@ip-172-0-32-156.cn-north-1.compute.internal"
] |
root@ip-172-0-32-156.cn-north-1.compute.internal
|
99ec437f0a447a861ff9eb9bac7541ee99e076d0
|
eac57d476082128b77884d03eefe1212de48ff64
|
/data_preprocessing/categoriesdataset.py
|
ecd0ea0f879b479d977a2ea7e7c5d5dd88398bc8
|
[
"Apache-2.0"
] |
permissive
|
amanalex1804/ML_using_python
|
f22cebb8da72481e89a83673d43a90b700aceded
|
d067a67f3693e5bed288d7f496545880d4b5a0ba
|
refs/heads/master
| 2021-05-06T15:36:40.417349
| 2017-12-23T12:28:37
| 2017-12-23T12:28:37
| 113,608,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
import pandas as pd
import matplotlib.pyplot as plot
import numpy as np
dataset=pd.read_csv('Data.csv') #save the file where there is dataset
#index in python starts with 0
X=dataset.iloc[:,:-1].values #gives output of independent variables i.e
# last column is dependent col so ignoring that
Y=dataset.iloc[:,3].values # y is in 4th col but index is from 0
#missing data: we will just take the mean values of the col where the data is missin
from sklearn.preprocessing import Imputer
imputer =Imputer(missing_values="NaN",strategy="mean",axis=0)
imputer=imputer.fit(X[:,1:3]) #col is 1 and 2 but syntax is [)
X[:,1:3]=imputer.transform(X[:,1:3])
#encoding the dataset into categories
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder_X=LabelEncoder()
X[:,0]=labelencoder_X.fit_transform(X[:,0])
onehotencoder=OneHotEncoder(categorical_features=[0]) #making objects
X=onehotencoder.fit_transform(X).toarray()
labelencoder_Y=LabelEncoder()
Y=labelencoder_Y.fit_transform(Y)
|
[
"noreply@github.com"
] |
amanalex1804.noreply@github.com
|
03ad41e2cab35c6138ec0f478fd9bed8a27e666b
|
6bd7eb96edc3224b1088f5479069bdb69f37d8f9
|
/ReadFile.py
|
3d4c4751668317e1ab3ec8af514386e96c7f9dc1
|
[] |
no_license
|
kourav1991/pythonProg
|
4d506b1bdf57b19e00603045fcd8925c83a69ccf
|
8e4cbb3db09209457eb4aa6189fcead4068db243
|
refs/heads/master
| 2020-04-19T09:00:52.285042
| 2019-01-31T07:32:11
| 2019-01-31T07:32:11
| 168,097,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 11:58:48 2019
@author: SKourav
"""
#By default file will be opend in read mode
fobj = open("300119.log","r")
for line in fobj:
line = line.strip()
print(line)
fobj.close();
|
[
"SKourav@slb.com"
] |
SKourav@slb.com
|
8e2a94350253582393fd0a0a0b96bc37328f4a26
|
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
|
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/EmailVerifiedRequest.py
|
58267b80a7aaa81c7e16519a64b7335af6e77b88
|
[
"Apache-2.0"
] |
permissive
|
P79N6A/dysms_python
|
44b634ffb2856b81d5f79f65889bfd5232a9b546
|
f44877b35817e103eed469a637813efffa1be3e4
|
refs/heads/master
| 2020-04-28T15:25:00.368913
| 2019-03-13T07:52:34
| 2019-03-13T07:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class EmailVerifiedRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'EmailVerified')
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_Email(self):
return self.get_query_params().get('Email')
def set_Email(self,Email):
self.add_query_param('Email',Email)
|
[
"1478458905@qq.com"
] |
1478458905@qq.com
|
9c25a7375860bd12235eac7d7d377971fada9ed2
|
85ae39c35f1f470f7dd207e3cb602775f1b09401
|
/torhello1.py
|
af57fb38493848aea1f967a1484f8cebaadf3445
|
[] |
no_license
|
JonaFly/projectident
|
9a9db826c321d9d157cdcae8febc11009d7c49ba
|
ddcffdfc4a82883d3c9813d9bab093330b5bb816
|
refs/heads/master
| 2022-10-30T02:55:26.004753
| 2018-09-24T00:45:01
| 2018-09-24T00:45:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8081, help="run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
greeting = self.get_argument('greeting', 'Hello')
self.write(greeting + ', friendly user!')
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[(r"/", IndexHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"997846481@qq.com"
] |
997846481@qq.com
|
4fcd87e6021b3183636ffa6f12d3cef8fb97f3ac
|
f3553f36a248d5e2a30713af68dd714df90953d7
|
/leetecode/2.py
|
abd354b28f96351ff7eaab2c3408f4b7ec8139bb
|
[] |
no_license
|
Mrzhouqifei/offfer
|
8a699653850cf6cc91ed5a622ad166fd61b8e294
|
4c73e7a591e79348471e00272dcb8e1b5cc6d7cb
|
refs/heads/master
| 2023-04-09T05:58:49.858037
| 2020-12-30T06:13:52
| 2020-12-30T06:13:52
| 298,285,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
x1, x2 = 0, 0
i, j = 1, 1
while l1:
x1 += l1.val * i
l1 = l1.next
i*=10
while l2:
x2 += l2.val * j
l2 = l2.next
j *= 10
x = x1 + x2
head = l3 = ListNode(0)
while x > 0:
l3.next = ListNode(x%10)
x = x//10
l3 = l3.next
return head.next
|
[
"18401620071@163.com"
] |
18401620071@163.com
|
c8d16f0e8a9133965cdfbabe3247ef6e64889008
|
675af57627be90e7517017f0f332d8a0b5c20e97
|
/mnist/part1/main.py
|
158e336284564e988aa16043242477ae4163d4d8
|
[] |
no_license
|
PierrotAWB/6.86x
|
b33dadf44087c58d149575be25ecd52a36f1d1d2
|
f534f0a9373d7039f14849f59b7ab22d0480d5a1
|
refs/heads/master
| 2020-06-04T13:06:54.376705
| 2019-07-18T03:45:07
| 2019-07-18T03:45:07
| 192,033,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,856
|
py
|
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("..")
from utils import *
from linear_regression import *
from svm import *
from softmax import *
from features import *
from kernel import *
#######################################################################
# 1. Introduction
#######################################################################
# Load MNIST data:
train_x, train_y, test_x, test_y = get_MNIST_data()
# Plot the first 20 images of the training set.
# plot_images(train_x[0:20, :])
#######################################################################
# 2. Linear Regression with Closed Form Solution
#######################################################################
# TODO: first fill out functions in linear_regression.py, otherwise the functions below will not work
def run_linear_regression_on_MNIST(lambda_factor=1):
"""
Trains linear regression, classifies test data, computes test error on test set
Returns:
Final test error
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
train_x_bias = np.hstack([np.ones([train_x.shape[0], 1]), train_x])
test_x_bias = np.hstack([np.ones([test_x.shape[0], 1]), test_x])
theta = closed_form(train_x_bias, train_y, lambda_factor)
test_error = compute_test_error_linear(test_x_bias, test_y, theta)
return test_error
# Don't run this until the relevant functions in linear_regression.py have been fully implemented.
# print('Linear Regression test_error =', run_linear_regression_on_MNIST(lambda_factor=1))
#######################################################################
# 3. Support Vector Machine
#######################################################################
# TODO: first fill out functions in svm.py, or the functions below will not work
def run_svm_one_vs_rest_on_MNIST():
"""
Trains svm, classifies test data, computes test error on test set
Returns:
Test error for the binary svm
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
train_y[train_y != 0] = 1
test_y[test_y != 0] = 1
pred_test_y = one_vs_rest_svm(train_x, train_y, test_x)
test_error = compute_test_error_svm(test_y, pred_test_y)
return test_error
# print('SVM one vs. rest test_error:', run_svm_one_vs_rest_on_MNIST())
def run_multiclass_svm_on_MNIST():
"""
Trains svm, classifies test data, computes test error on test set
Returns:
Test error for the binary svm
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
pred_test_y = multi_class_svm(train_x, train_y, test_x)
test_error = compute_test_error_svm(test_y, pred_test_y)
return test_error
# print('Multiclass SVM test_error:', run_multiclass_svm_on_MNIST())
#######################################################################
# 4. Multinomial (Softmax) Regression and Gradient Descent
#######################################################################
# TODO: first fill out functions in softmax.py, or run_softmax_on_MNIST will not work
def run_softmax_on_MNIST(temp_parameter=1):
"""
Trains softmax, classifies test data, computes test error, and plots cost function
Runs softmax_regression on the MNIST training set and computes the test error using
the test set. It uses the following values for parameters:
alpha = 0.3
lambda = 1e-4
num_iterations = 150
Saves the final theta to ./theta.pkl.gz
Returns:
Final test error
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
theta, cost_function_history = softmax_regression(train_x, train_y, temp_parameter, alpha=0.3, lambda_factor=1.0e-4, k=10, num_iterations=150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_x, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
write_pickle_data(theta, "./theta.pkl.gz")
# TODO: add your code here for the "Using the Current Model" question in tab 4.
train_y, test_y = update_y(train_y, test_y)
print ("Test error mod 3: ", compute_test_error_mod3(test_x, test_y, theta, temp_parameter))
return test_error
# print('softmax test_error=', run_softmax_on_MNIST(temp_parameter=1))
# TODO: Find the error rate for temp_parameter = [.5, 1.0, 2.0]
# Remember to return the tempParameter to 1, and re-run run_softmax_on_MNIST
#######################################################################
# 6. Changing Labels
#######################################################################
def run_softmax_on_MNIST_mod3(temp_parameter=1):
"""
Trains Softmax regression on digit (mod 3) classifications.
See run_softmax_on_MNIST for more info.
"""
train_x, train_y, test_x, test_y = get_MNIST_data()
train_y, test_y = update_y(train_y, test_y)
theta, cost_function_history = softmax_regression(train_x, train_y, temp_parameter, alpha=0.3, lambda_factor=1.0e-4, k=10, num_iterations=150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error_mod3(test_x, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
write_pickle_data(theta, "./theta.pkl.gz")
return test_error
raise NotImplementedError
# print('softmax mod3 test_error=', run_softmax_on_MNIST_mod3(temp_parameter=1))
# TODO: Run run_softmax_on_MNIST_mod3(), report the error rate
#######################################################################
# 7. Classification Using Manually Crafted Features
#######################################################################
## Dimensionality reduction via PCA ##
# TODO: First fill out the PCA functions in features.py as the below code depends on them.
# n_components = 10
# pcs = principal_components(train_x)
# train_pca = project_onto_PC(train_x, pcs, n_components)
# test_pca = project_onto_PC(test_x, pcs, n_components)
# train_pca (and test_pca) is a representation of our training (and test) data
# after projecting each example onto the first 18 principal components.
# TODO: Train your softmax regression model using (train_pca, train_y)
# and evaluate its accuracy on (test_pca, test_y).
# temp_parameter = 1
# theta, cost_function_history = softmax_regression(train_pca, train_y, temp_parameter, alpha=0.3, lambda_factor=1.0e-4, k=10, num_iterations=150)
# plot_cost_function_over_time(cost_function_history)
# test_error = compute_test_error(test_pca, test_y, theta, temp_parameter)
# # Save the model parameters theta obtained from calling softmax_regression to disk.
# write_pickle_data(theta, "./theta.pkl.gz")
# print("PCA (10 components) error: ", test_error)
# # TODO: Use the plot_PC function in features.py to produce scatterplot
# # of the first 100 MNIST images, as represented in the space spanned by the
# # first 2 principal components found above.
# plot_PC(train_x[range(100), ], pcs, train_y[range(100)])
# # TODO: Use the reconstruct_PC function in features.py to show
# # the first and second MNIST images as reconstructed solely from
# # their 18-dimensional principal component representation.
# # Compare the reconstructed images with the originals.
# firstimage_reconstructed = reconstruct_PC(train_pca[0, ], pcs, n_components, train_x)
# plot_images(firstimage_reconstructed)
# plot_images(train_x[0, ])
# secondimage_reconstructed = reconstruct_PC(train_pca[1, ], pcs, n_components, train_x)
# plot_images(secondimage_reconstructed)
# plot_images(train_x[1, ])
## Cubic Kernel ##
# TODO: Find the 10-dimensional PCA representation of the training and test set
n_components = 10
pcs = principal_components(train_x)
train_pca10 = project_onto_PC(train_x, pcs, n_components)
test_pca10 = project_onto_PC(test_x, pcs, n_components)
# TODO: First fill out cubicFeatures() function in features.py as the below code requires it.
train_cube = cubic_features(train_pca10)
test_cube = cubic_features(test_pca10)
# train_cube (and test_cube) is a representation of our training (and test) data
# after applying the cubic kernel feature mapping to the 10-dimensional PCA representations.
temp_parameter = 1
theta, cost_function_history = softmax_regression(train_cube, train_y, temp_parameter, alpha=0.3, lambda_factor=1.0e-4, k=10, num_iterations=150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_cube, test_y, theta, temp_parameter)
# Save the model parameters theta obtained from calling softmax_regression to disk.
write_pickle_data(theta, "./theta.pkl.gz")
print("PCA (10 components) error: ", test_error)
# TODO: Train your softmax regression model using (train_cube, train_y)
# and evaluate its accuracy on (test_cube, test_y).
|
[
"andrewwang298@gmail.com"
] |
andrewwang298@gmail.com
|
4ecfaa8c3a849fe254c93406f473ba9714c3d480
|
379131ade0864825d11c4c95cad95811bb21a9d6
|
/users/urls.py
|
3774ab8e860b0932c245715f11dce46f4aba6845
|
[] |
no_license
|
AISavvyToday/InvoiceManagementSystem
|
7187407e3df11a2251f0ad2c3548980b4d21bad1
|
3ab0da6e25fc8404189e69bc8512756b1557b185
|
refs/heads/master
| 2023-02-17T08:42:44.925285
| 2020-11-18T08:57:05
| 2020-11-18T08:57:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
from django.conf.urls import url
from users.views.login_view import sign_in
from users.views.logout_view import sign_out
from users.views.registration_view import registration
app_name = 'users'
urlpatterns = [
url(r'^registration/$', registration, name="registration"),
url(r'^$', sign_in, name="login"),
url(r'^logout/$', sign_out, name="logout"),
]
|
[
"muhuri.json@gmail.com"
] |
muhuri.json@gmail.com
|
0e54acb8a5fbcbba385c48fb8b10904c9a43d4ab
|
c11993c5da54e38cd4ed20d649ad6a43fa2ede1a
|
/flaskapp/web/app/item/forms.py
|
551c9efadb1eb42e85227a816922abbf55f2df5b
|
[] |
no_license
|
enderst3/adv_portfolio
|
c786ac456e871ac13ee80618befcc5097145f7d4
|
4e312f8beee4951b62459260f4d5619f729b9cc4
|
refs/heads/master
| 2021-05-13T21:11:50.074289
| 2018-10-27T03:12:46
| 2018-10-27T03:12:46
| 116,456,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
import re
from flask_wtf import FlaskForm
from wtforms import BooleanField, DecimalField, FloatField, IntegerField, \
DateTimeField, DateField, \
FileField, PasswordField, StringField, TextAreaField, \
RadioField, SelectField, SelectMultipleField, \
HiddenField, SubmitField
from wtforms.validators import InputRequired, Length
from wtforms import ValidationError
from .models import ItemModel
def filter_keyname(data):
return re.sub('[^a-z0-9_-]', '', str(data).lower())
def validate_keyname(self, field):
if field.data != self.item.keyname and \
ItemModel.query.filter_by(keyname=field.data).first():
raise ValidationError('Keyname already in use.')
class CreatItemForm(FlaskForm):
keyname = StringField('Keyname', validators=[InputRequired(),Length(2,63),validate_keyname], filters=[filter_keyname])
item_title = StringField('Title', validators=[InputRequired(),Length(1,255)])
submit = SubmitField('Create Item')
def __init__(self, item, *args, **kwargs):
super(CreatItemForm, self).__init__(*args, **kwargs)
self.item = item
class EditItemForm(FlaskForm):
id = HiddenField('id')
keyname = StringField('Keyname', validators=[InputRequired(),Length(2,63),validate_keyname], filters=[filter_keyname])
active = BooleanField('Active')
item_title = StringField('Title', validators=[InputRequired(),Length(1,255)])
item_text = TextAreaField('Text')
mod_create = DateTimeField('Item Created')
mod_update = DateTimeField('Item Updated')
owner_id = SelectField('Item Owner', choices=[], coerce=int)
users_id = SelectMultipleField('Editors', choices=[], coerce=int)
submit = SubmitField('Update Item')
def __init__(self, item, *args, **kwargs):
super(EditItemForm, self).__init__(*args, **kwargs)
self.item = item
|
[
"enderst3@gmail.com"
] |
enderst3@gmail.com
|
91d548f7f5194f56b98f906d4709fa3c53ecb616
|
6bef221e0ac6147b67e9aaeb39daf09cbec2cdb1
|
/leetcode/38.CountAndSay/main.py
|
b04e224a5f9fad09cbf6cb2a9d771ea6d67ece02
|
[] |
no_license
|
lee3164/newcoder
|
470798af7d25418e9936c71acb049d0b4032abab
|
f563dbf35878808491f03281889c9a0800be7d90
|
refs/heads/master
| 2021-06-28T08:51:22.186836
| 2020-09-15T14:48:05
| 2020-09-15T14:48:05
| 138,613,287
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
# coding=utf-8
class Solution(object):
"""
报数序列是一个整数序列,按照其中的整数的顺序进行报数,得到下一个数。其前五项如下:
1. 1
2. 11
3. 21
4. 1211
5. 111221
1 被读作 "one 1" ("一个一") , 即 11。
11 被读作 "two 1s" ("两个一"), 即 21。
21 被读作 "one 2", "one 1" ("一个二" , "一个一") , 即 1211。
给定一个正整数 n(1 ≤ n ≤ 30),输出报数序列的第 n 项。
注意:整数顺序将表示为一个字符串。
示例 1:
输入: 1
输出: "1"
示例 2:
输入: 4
输出: "1211"
下一个排列由上一个得出,递归最简单,非递归也不难
"""
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
if n == 1:
return "1"
arr = []
s = self.countAndSay(n - 1)
i = 0
while i < len(s):
j = i + 1
while j < len(s) and s[i] == s[j]:
j += 1
continue
arr.append(str(j - i))
arr.append(s[i])
i = j
return "".join(arr)
def countAndSay2(self, n):
s = "1"
for i in range(0, n - 1):
arr = []
i = 0
while i < len(s):
j = i + 1
while j < len(s) and s[i] == s[j]:
j += 1
continue
arr.append(str(j - i))
arr.append(s[i])
i = j
s = "".join(arr)
return s
if __name__ == '__main__':
print Solution().countAndSay2(4)
|
[
"lee3164@qq.com"
] |
lee3164@qq.com
|
19283c42a513ae9d4e47da3321796bdf3ed6eb02
|
440736a5c39b10d2aa3e038d6c383760f3b22c2e
|
/train_c3d.py
|
9662dfe08db5e66d23dd221d2f96ed6ed6ef0137
|
[] |
no_license
|
breadbread1984/c3d
|
6f34ddeb743e7b18f6afeebc4f31841c8c5e3e28
|
a94ea01099d74daede81ec0a051da3d63b66a833
|
refs/heads/master
| 2020-03-22T13:46:42.319948
| 2019-05-14T01:07:26
| 2019-05-14T01:07:26
| 140,131,197
| 18
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,973
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import;
from __future__ import division;
from __future__ import print_function;
import os;
import numpy as np;
import tensorflow as tf;
batch_size = 8;
class_num = 101;
def main(unused_argv):
action_classifier = tf.estimator.Estimator(model_fn = action_model_fn, model_dir = "action_classifier_model");
tf.logging.set_verbosity(tf.logging.DEBUG);
logging_hook = tf.train.LoggingTensorHook(tensors = {"loss":"loss"}, every_n_iter = 1);
action_classifier.train(input_fn = train_input_fn,steps = 200000,hooks = [logging_hook]);
eval_results = action_classifier.evaluate(input_fn = eval_input_fn);
print(eval_results);
def parse_function(serialized_example):
feature = tf.parse_single_example(
serialized_example,
features = {
'clips': tf.FixedLenFeature((),dtype = tf.string, default_value = ''),
'label': tf.FixedLenFeature((),dtype = tf.int64, default_value = 0)
}
);
clips = tf.decode_raw(feature['clips'],out_type = tf.uint8);
clips = tf.reshape(clips,[16,112,112,3]);
clips = tf.cast(clips, dtype = tf.float32);
label = tf.cast(feature['label'], dtype = tf.int32);
return clips,label;
def train_input_fn():
dataset = tf.data.TFRecordDataset(['trainset.tfrecord']);
dataset = dataset.map(parse_function);
dataset = dataset.shuffle(buffer_size = 512);
dataset = dataset.batch(batch_size);
dataset = dataset.repeat(200);
iterator = dataset.make_one_shot_iterator();
features, labels = iterator.get_next();
return {"features": features}, labels;
def eval_input_fn():
dataset = tf.data.TFRecordDataset(['testset.tfrecord']);
dataset = dataset.map(parse_function);
dataset = dataset.shuffle(buffer_size = 512);
dataset = dataset.batch(batch_size);
dataset = dataset.repeat(1);
iterator = dataset.make_one_shot_iterator();
features, labels = iterator.get_next();
return {"features": features}, labels;
def action_model_fn(features, labels, mode):
# with tf.device('/device:GPU:1'):
features = features["features"];
#layer 1
c1 = tf.layers.conv3d(features,filters = 64, kernel_size = [3,3,3], padding = "same");
b1 = tf.contrib.layers.layer_norm(c1,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
p1 = tf.layers.max_pooling3d(b1,pool_size = [1,2,2], strides = [1,2,2], padding = "same");
#layer 2
c2 = tf.layers.conv3d(p1,filters = 128, kernel_size = [3,3,3], padding = "same");
b2 = tf.contrib.layers.layer_norm(c2,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
p2 = tf.layers.max_pooling3d(b2,pool_size = [2,2,2], strides = [2,2,2], padding = "same");
#layer 3
c3a = tf.layers.conv3d(p2,filters = 256, kernel_size = [3,3,3], padding = "same");
b3a = tf.contrib.layers.layer_norm(c3a,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
c3b = tf.layers.conv3d(b3a,filters = 256, kernel_size = [3,3,3], padding = "same");
b3b = tf.contrib.layers.layer_norm(c3b,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
p3 = tf.layers.max_pooling3d(b3b,pool_size = [2,2,2], strides = [2,2,2], padding = "same");
#layer 4
c4a = tf.layers.conv3d(p3,filters = 512, kernel_size = [3,3,3], padding = "same");
b4a = tf.contrib.layers.layer_norm(c4a,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
c4b = tf.layers.conv3d(b4a,filters = 512, kernel_size = [3,3,3], padding = "same");
b4b = tf.contrib.layers.layer_norm(c4b,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
p4 = tf.layers.max_pooling3d(b4b,pool_size = [2,2,2], strides = [2,2,2], padding = "same");
# with tf.device('/device:GPU:2'):
#layer 5
c5a = tf.layers.conv3d(p4,filters = 512, kernel_size = [3,3,3], padding = "same");
b5a = tf.contrib.layers.layer_norm(c5a,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
c5b = tf.layers.conv3d(b5a,filters = 512, kernel_size = [3,3,3], padding = "same");
b5b = tf.contrib.layers.layer_norm(c5b,activation_fn = tf.nn.relu, trainable = mode == tf.estimator.ModeKeys.TRAIN);
p5 = tf.layers.max_pooling3d(b5b,pool_size = [2,2,2], strides = [2,2,2], padding = "same");
#flatten
f = tf.layers.flatten(p5);
d1 = tf.layers.dense(f,units = 4096, activation = tf.nn.relu);
dp1 = tf.layers.dropout(d1,training = mode == tf.estimator.ModeKeys.TRAIN);
d2 = tf.layers.dense(dp1,units = 4096, activation = tf.nn.relu);
dp2 = tf.layers.dropout(d2,training = mode == tf.estimator.ModeKeys.TRAIN);
logits = tf.layers.dense(dp2,units = class_num);
#predict mode
if mode == tf.estimator.ModeKeys.PREDICT:
action = tf.argmax(logits,axis = 1);
return tf.estimator.EstimatorSpec(mode = mode,predictions = action);
if mode == tf.estimator.ModeKeys.TRAIN:
onehot_labels = tf.one_hot(labels,class_num);
loss = tf.losses.softmax_cross_entropy(onehot_labels,logits);
loss = tf.identity(loss,name = "loss");
optimizer = tf.train.AdamOptimizer(1e-4);
train_op = optimizer.minimize(loss = loss, global_step = tf.train.get_global_step());
return tf.estimator.EstimatorSpec(mode = mode, loss = loss, train_op = train_op);
if mode == tf.estimator.ModeKeys.EVAL:
onehot_labels = tf.one_hot(labels,class_num);
loss = tf.losses.softmax_cross_entropy(onehot_labels,logits);
loss = tf.identity(loss,name = "loss");
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels = labels,predictions = tf.argmax(logits,axis = 1))};
return tf.estimator.EstimatorSpec(mode = mode, loss = loss, eval_metric_ops = eval_metric_ops);
raise Exception('Unknown mode of estimator!');
if __name__ == "__main__":
tf.app.run();
|
[
"breadbread1984@163.com"
] |
breadbread1984@163.com
|
a0106cef67d0e1931ba87fd161fe51ea3b324dd6
|
8120975953f5ed704894284048efb25ac5acabfd
|
/server/apps/education/libs/asgn.py
|
6d5d337a03e4630f0a9357f63c0241caf7139eab
|
[] |
no_license
|
DICKQI/WeJudge-2-Dev
|
9815b69a56dab13a9aab52bcf75ebfabfc05a8e1
|
ded211428adc9506e7a7b9bbaa5d38c4b5c798d8
|
refs/heads/master
| 2021-08-31T18:20:31.285661
| 2017-12-22T07:22:19
| 2017-12-22T07:22:19
| 115,104,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,756
|
py
|
# -*- coding: utf-8 -*-
# coding:utf-8
import os
import json
import zipfile
from wejudge.core import *
from wejudge.utils import *
from wejudge.utils import tools
from wejudge.const import system
from apps.education.libs import education
import apps.education.models as EducationModel
import apps.problem.models as ProblemModel
from .base import EducationBaseController
from .education import EducationController
from apps.problem.libs.base import ProblemBaseController
from django.http.response import HttpResponseNotFound
__author__ = 'lancelrq'
class EducationAsgnController(EducationBaseController, ProblemBaseController):
def __init__(self, request, response, sid):
super(EducationAsgnController, self).__init__(request, response, sid)
# 获取作业的题目列表
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator(flag_enable=(0, 1))
def get_asgn_problems(self):
"""
获取作业的题目列表
:return:
"""
view_list = []
user = self.session.account
problems_list = self.asgn.problems.order_by('index')
field_list = [
"id", "entity", "entity__id", "entity__title", "entity__difficulty", "index", "accepted", "submission",
"require", "score", "lang", "strict_mode", "max_score_for_wrong", "hidden_answer"
]
if self.asgn.hide_problem_title:
field_list.remove("entity__title")
if user.role == 0:
field_list.remove("entity__id")
for problem in problems_list:
pitem = problem.json(items=field_list)
if user.role == 0:
sol = EducationModel.Solution.objects.filter(asgn=self.asgn, author=user, problem=problem)
if sol.exists():
sol = sol[0]
if sol.accepted > 0:
pitem["status"] = 2
else:
pitem["status"] = 1
pitem['status_count'] = "%s/%s" % (sol.accepted, sol.submission)
pitem['status_score'] = int((sol.score / 100.0) * problem.score)
else:
pitem["status"] = 0
pitem['status_score'] = -1
pitem['status_count'] = ""
view_list.append(pitem)
return {
"data": view_list
}
# 获取实验报告信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
def get_asgn_report_detail(self, rid=None):
"""
获取实验报告信息
:param rid: 非必须,获取指定detail
:return:
"""
report = self.get_asgn_report(rid)
author = report.author
v_soluctions = {}
v_asgn_problems = []
score_list = {}
problems_list = self.asgn.problems.order_by('index')
p_field_list = ["id", "entity", "entity__title", "score", 'index']
if self.asgn.hide_problem_title:
p_field_list.remove("entity__title")
for ap in problems_list:
v_asgn_problems.append(ap.json(items=p_field_list))
score_list[ap.id] = ap.score
solutions = self.asgn.solution_set.filter(author=author)
for sol in solutions:
vdata = sol.json(items=[
'first_visit_time', 'submission', 'accepted', 'penalty',
'first_ac_time', 'best_memory', 'best_time', 'best_code_size'
], timestamp=True)
pid = sol.problem.id
vdata["finally_score"] = int(int(score_list.get(pid, 0)) * (sol.score / 100.0))
v_soluctions[pid] = vdata
return {
"soluctions": v_soluctions,
"problems": v_asgn_problems,
"report": report.json(items=[
"id", "judge_score", "finally_score", "ac_counter", "submission_counter",
"solved_counter", "public_code","impression", "create_time", "modify_time",
"teacher_check", "teacher_remark", "excellent", 'attachment'
]),
"author": author.json(items=[
"username", "realname"
])
}
# 保存当前学生的实验感想信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator(flag_enable=(0,))
def save_asgn_report_impression(self):
"""
保存当前学生的实验感想信息(注意,是当前,因为不可能给别人弄实验报告的别想了。)
:return:
"""
report = self.get_asgn_report()
if report is None:
raise WeJudgeError(3102)
if report.teacher_check:
raise WeJudgeError(3107)
parser = ParamsParser(self._request)
impression = parser.get_str("impression", require=True, method="POST", errcode=3104)
report.impression = impression
report.save()
# 实验报告上传附件
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator(flag_enable=(0,))
def upload_asgn_report_attchment(self):
"""
实验报告上传附件
:return:
"""
report = self.get_asgn_report()
if report is None:
raise WeJudgeError(3102)
if report.teacher_check:
raise WeJudgeError(3107)
parser = ParamsParser(self._request)
# file = parser.get_file("uploadFile", require=True, max_size=233*1024*1024, type=[
# "image/pjpeg", "image/jpeg", "image/png", "image/x-png", "image/gif", "image/bmp",
# "application/msword", "application/vnd.ms-excel", "application/vnd.ms-powerpoint", "application/pdf",
# "application/x-gzip", "application/zip", "text/plain",
# "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
# "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
# "application/vnd.openxmlformats-officedocument.presentationml.presentation",
# ])
file = parser.get_file("uploadFile", require=True, max_size=233 * 1024 * 1024)
storage = WeJudgeStorage(system.WEJUDGE_STORAGE_ROOT.EDUCATION_ASGN_ATTACHMENT, str(self.asgn.id))
fname = "%s.attacment" % (str(report.id))
destination = storage.open_file(fname, 'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
report.attachment = json.dumps({
"filename": file.name,
"source": fname
})
report.save()
# 下载实验报告的附件
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
def download_asgn_report_attchment(self, rid):
"""
下载实验报告的附件
:return:
"""
if rid == "0":
report = self.get_asgn_report()
else:
report = self.get_asgn_report(rid)
if report is None:
raise WeJudgeError(3102)
if rid is not None and self.session.account.role < 1 and report.author != self.session.account:
raise WeJudgeError(3103)
storage = WeJudgeStorage(system.WEJUDGE_STORAGE_ROOT.EDUCATION_ASGN_ATTACHMENT, str(self.asgn.id))
attachment = None
try:
if report.attachment is not None:
attachment = json.loads(report.attachment)
except:
attachment = None
if attachment is not None:
fp = storage.open_file(attachment.get("source"), 'rb')
def read_file(buf_size=8192): # 大文件下载,设定缓存大小
while True: # 循环读取
c = fp.read(buf_size)
if c:
yield c
else:
break
fp.close()
response = HttpResponse(
read_file(),
content_type="application/octet-stream"
) # 设定文件头,这种设定可以让任意文件都能正确下载,而且已知文本文件不是本地打开
response['Content-Length'] = os.path.getsize(storage.get_file_path(attachment.get("source")))
response['Content-Disposition'] = "attachment; filename=%s" % attachment.get('filename')
return response
else:
return HttpResponseNotFound()
# 读取当前作业的所有评测历史
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
def get_judge_status(self):
"""
读取当前作业的所有评测历史
:return:
"""
asgn = self.asgn
model_obj = self._judge_status_filter(asgn.judge_status)
return self._get_judge_status(model_obj)
# 排行信息获取
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
def get_ranklist(self):
"""
排行信息获取
:return:
"""
accounts_solutions = {}
count = 1
parser = ParamsParser(self._request)
arrid = parser.get_int('arrangement_id', 0)
students_filter_ids = None
if arrid > 0:
arr = EducationModel.Arrangement.objects.filter(id=arrid)
if arr.exists():
arr = arr[0]
students_filter_ids = [stu.id for stu in arr.students.all()]
# 对于这种可预知的大量查询,Django不会处理组合查询的,所以还是先查出来再走一次循环以节省与数据库交互的时间
solutions = EducationModel.Solution.objects.filter(asgn=self.asgn)
for sol in solutions:
a = accounts_solutions.get(sol.author_id, {})
a[sol.problem_id] = sol.json(items=[
'accepted', 'submission', 'penalty', 'best_memory',
'best_time', 'best_code_size', 'first_ac_time',
'used_time', 'used_time_real'
])
accounts_solutions[sol.author_id] = a
account_view = []
if students_filter_ids is not None:
rank_model = EducationModel.AsgnReport.objects.filter(
asgn=self.asgn, author__id__in=students_filter_ids
).order_by('-rank_solved', 'rank_timeused')
else:
rank_model = EducationModel.AsgnReport.objects.filter(
asgn=self.asgn
).order_by('-rank_solved', 'rank_timeused')
for _account in rank_model:
account = _account.json(items=[
'id', 'author', 'author__id', 'author__username', 'author__nickname',
'author__realname', 'author__sex', 'rank_solved', 'rank_timeused'
])
account['solutions'] = accounts_solutions.get(_account.author_id, {})
account['rank'] = count
count += 1
account_view.append(account)
return {
"data": account_view,
"problems": [x.json(items=[
"id", "index"
]) for x in self.asgn.problems.order_by('index')],
}
# 获取滚榜数据
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
def get_rank_board_datas(self):
"""
获取滚榜数据(比赛服阉割版)
:return:
"""
import time
from django.utils.timezone import datetime
asgn = self.asgn
# 获取题目数据
aproblem_list = asgn.problems.order_by("index")
# 题目索引参照表
apindex_list = {}
for aproblem in aproblem_list:
apindex_list[aproblem.entity.id] = aproblem.index
parser = ParamsParser(self._request)
# 获取最后一次刷新的时间
last_time = parser.get_float('time', 0)
try:
last_time = datetime.fromtimestamp(last_time)
except Exception as ex:
last_time = 0
user_list = {}
if type(last_time) is datetime:
# 设置了时间,就是弹时间线
judge_status = asgn.judge_status.filter(create_time__gt=last_time).order_by("id")
else:
# 没有,就是弹初始化数据
judge_status = asgn.judge_status.filter(create_time__lte=datetime.now())
reports = EducationModel.AsgnReport.objects.filter(asgn=asgn)
for report in reports:
user_list[str(report.author.id)] = report.json(items=[
'id', 'author', 'author__id', 'author__nickname', 'author__username',
'author__realname', 'author__headimg', 'author__sex', 'start_time'
])
judge_status_list = [{
"id": status.id,
"problem_id": status.problem_id,
"user_id": status.author_id,
"flag": status.flag,
"timestamp": int(status.create_time.timestamp())
} for status in judge_status]
return {
"problem_indexs": apindex_list,
"problem_list": [aproblem.entity.id for aproblem in aproblem_list],
"judge_status": judge_status_list,
"user_list": user_list,
"nowtime": time.time()
}
# 获取作业参考答案
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator(flag_enable=(0, 1))
def get_answer(self):
if self.session.account.role == 0 and self.asgn.public_answer_at is None:
raise WeJudgeError(3106)
from django.utils.timezone import now
now_time = now()
if self.session.account.role == 0 and now_time < self.asgn.public_answer_at:
return {
"status": "本次开放作业时间为: %s" % self.asgn.public_answer_at
}
else:
return self._get_answer()
# ====== Management ======
# 保存题目的设置信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def save_asgn_problem_setting(self):
"""
保存题目的设置信息
:return:
"""
parser = ParamsParser(self._request)
lang_list = parser.get_list("lang", method="POST")
score = parser.get_int("score", require=True, min=0, method="POST", errcode=3151)
max_score_for_wrong = parser.get_int(
"max_score_for_wrong", require=True, max=100, min=0, method="POST", errcode=3152
)
strict_mode = parser.get_boolean("strict_mode", False, method="POST")
require = parser.get_boolean("require", True, method="POST")
hidden_answer = parser.get_boolean("hidden_answer", False, method="POST")
lang = 0
for item in lang_list:
if system.WEJUDGE_PROGRAM_LANGUAGE_SUPPORT.exists(item):
lang = (lang | int(item))
sc = 0
for ap in self.asgn.problems.all():
if ap.id != self.asgn_problem_item.id:
sc += ap.score
if (sc + score) > self.asgn.full_score:
raise WeJudgeError(3150)
self.asgn_problem_item.score = score
self.asgn_problem_item.strict_mode = strict_mode
self.asgn_problem_item.max_score_for_wrong = max_score_for_wrong
self.asgn_problem_item.require = require
self.asgn_problem_item.lang = lang
self.asgn_problem_item.hidden_answer = hidden_answer
self.asgn_problem_item.save()
return True
# 保存作业选题
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def save_problem_choosing(self):
"""
保存作业选题
:return:
"""
parser = ParamsParser(self._request)
problem_ids = parser.get_list("problem_ids", method="POST")
result = {}
for pid in problem_ids:
problem = ProblemModel.Problem.objects.filter(id=pid)
if not problem.exists():
result[pid] = 1
continue
problem = problem[0]
if self.asgn.problems.filter(entity=problem).exists():
result[pid] = 2
continue
# 检查如果题目被移除,那把该题目的信息再刷关联回去
old_choose = EducationModel.AsgnProblem.objects.filter(asgn=self.asgn, entity=problem)
if old_choose.exists():
old_choose = old_choose[0]
self.asgn.problems.add(old_choose)
else:
# 新建AProblem信息
ap = EducationModel.AsgnProblem()
ap.entity = problem
ap.asgn = self.asgn
ap.index = self.asgn.problems.count() + 1
ap.lang = self.asgn.lang
ap.require = True
ap.save()
self.asgn.problems.add(ap)
self.asgn.save()
result[pid] = 3
self._recalc_problems_index()
return result
# 获取作业的已选择题目
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def get_problems_choosed(self):
"""
获取作业的已选择题目
:return:
"""
course = self.asgn.course
problem_ids = []
for asgn in course.asgn_set.all():
for ap in asgn.problems.all():
problem_ids.append(ap.entity_id)
problem_ids = list(set(problem_ids))
return problem_ids
# 移除题目
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def remove_asgn_problem(self):
self.asgn.problems.remove(self.asgn_problem_item)
self._recalc_problems_index()
return True
# 获取作业设置信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(1)
def get_asgn_settings(self):
"""
获取作业设置信息
:return:
"""
asgn_info = self.asgn.json(items=[
"title", "full_score", "lang", "description",
"hide_problem_title", 'public_answer_at', 'hide_student_code'
])
access_info = {x.arrangement.id: x.json(items=[
"id", "start_time", "end_time", "enabled"
], timestamp=False) for x in self.asgn.access_control.all()}
arrangements = self.asgn.course.arrangements.all()
arrangements_list = []
for arr in arrangements:
d = arr.json(items=(
"id", "name", "day_of_week", "start_week", "end_week",
"odd_even", "start_section", "end_section", "start_time", "end_time"
))
d['full_name'] = arr.toString()
if d.get("id") in access_info.keys():
d['access_info'] = access_info.get(d.get("id"))
else:
a = EducationModel.AsgnAccessControl()
a.arrangement = arr
a.enabled = False
a.save()
self.asgn.access_control.add(a)
d['access_info'] = a.json(items=[
"id", "start_time", "end_time", "enabled"
], timestamp=False)
arrangements_list.append(d)
sections = {}
try:
import json
sections = json.loads(self.school.sections)
sections = sections.get('sections', {})
except:
pass
return {
"sections": sections,
"asgn": asgn_info,
"arrangements": arrangements_list
}
# 保存作业设置信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def save_asgn_setting(self):
"""
保存作业设置信息
:return:
"""
parser = ParamsParser(self._request)
lang_list = parser.get_list("lang", method="POST")
title = parser.get_str("title", require=True, method="POST", errcode=3160)
description = parser.get_str("description", default="", method="POST")
full_score = parser.get_float("full_score", min=1.0, max=150.0, require=True, method="POST", errcode=3161)
hide_problem_title = parser.get_boolean("hide_problem_title", False, method="POST")
hide_student_code = parser.get_boolean("hide_student_code", False, method="POST")
arrangement_ids = parser.get_list("arrangements", method="POST")
lang = 0
for item in lang_list:
if system.WEJUDGE_PROGRAM_LANGUAGE_SUPPORT.exists(item):
lang = (lang | int(item))
self.asgn.title = title
self.asgn.lang = lang
self.asgn.description = description
self.asgn.full_score = full_score
self.asgn.hide_problem_title = hide_problem_title
self.asgn.hide_student_code = hide_student_code
access_controls = self.asgn.access_control.all()
for ac in access_controls:
if str(ac.id) not in arrangement_ids:
ac.enabled = False
ac.save()
else:
start_at = parser.get_datetime(
"start_time_%s" % str(ac.id), require=True, method="POST", errcode=3163
)
end_at = parser.get_datetime(
"end_time_%s" % str(ac.id), require=True, method="POST", errcode=3163
)
ac.enabled = True
ac.start_time = start_at
ac.end_time = end_at
ac.save()
last_time = access_controls[0].end_time
for ac in access_controls:
if ac.end_time is not None and last_time is not None:
last_time = max(ac.end_time, last_time)
self.asgn.public_answer_at = last_time
self.asgn.save()
return True
# 重算作业的数据
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def refresh_asgn_datas(self):
"""
重算作业的数据
:return:
"""
if self.asgn.archive_lock:
raise WeJudgeError(3199)
from .workers import refresh_asgn_datas
refresh_asgn_datas.delay(self.asgn.id)
# 重判题目
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def rejudge_problems(self):
"""
重判题目
:return:
"""
from .workers import asgn_judge
problem = self.asgn_problem_item
status_list = self.asgn.judge_status.filter(virtual_problem=problem)
for status in status_list:
asgn_judge.delay(problem.entity.id, status.id, self.asgn.id, problem.strict_mode)
return True
# 获取实验报告列表
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(1)
def get_reports_list(self):
"""
获取实验报告列表
:return:
"""
reports = EducationModel.AsgnReport.objects.filter(asgn=self.asgn).order_by('teacher_check')
return {
"data": [report.json(items=[
'id', 'author', 'author__id', 'author__realname', 'author__username', 'author__nickname',
'judge_score', 'finally_score', 'ac_counter', 'submission_counter', 'solved_counter',
'modify_time', 'teacher_check', 'excellent', 'public_code'
]) for report in reports]
}
# 保存作业的批改信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(1)
def save_asgn_report_checkup(self, report_id):
"""
保存作业的批改信息
:return:
"""
report = self.get_asgn_report(report_id)
parser = ParamsParser(self._request)
finally_score = parser.get_float(
"finally_score", min=0, max=self.asgn.full_score, require=True, method="POST", errcode=3164
)
remark = parser.get_str("remark", "", method="POST")
public_code = parser.get_boolean("public_code", False, method="POST")
excellent = parser.get_boolean("excellent", False, method="POST")
report.teacher_check = True
report.finally_score = finally_score
report.teacher_remark = remark
report.public_code = public_code
report.excellent = excellent
report.save()
return True
# 批量保存作业的批改信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(1)
def save_asgn_report_checkup_batch(self):
"""
批量保存作业的批改信息
:return:
"""
parser = ParamsParser(self._request)
report_ids = parser.get_list('report_ids', method='POST', require=True)
use_judge_score = parser.get_boolean('use_judge_score', True, method='POST')
if not use_judge_score:
finally_score = parser.get_float(
"finally_score", min=0, max=self.asgn.full_score, require=True, method="POST", errcode=3164
)
else:
finally_score = 0
remark = parser.get_str("remark", "", method="POST")
for report_id in report_ids:
report = self.get_asgn_report(report_id)
report.teacher_check = True
if use_judge_score:
report.finally_score = report.judge_score
else:
report.finally_score = finally_score
report.teacher_remark = remark
report.save()
return True
# 获取调课记录信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(1)
def get_visit_requirement(self):
"""
获取实验报告列表
:return:
"""
visits = EducationModel.AsgnVisitRequirement.objects.filter(asgn=self.asgn)
return {
"data": [visit.json(items=[
'id', 'author', 'author__id', 'author__realname', 'author__username', 'author__nickname',
'arrangement', "arrangement__id", "arrangement__name", "arrangement__day_of_week",
"arrangement__start_week", "arrangement__end_week", "arrangement__odd_even", "arrangement__end_time",
"arrangement__start_section", "arrangement__end_section", "arrangement__start_time", 'create_time'
], timestamp=False) for visit in visits]
}
# 创建调课信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def add_visit_requirement(self):
"""
创建调课信息
:return:
"""
parser = ParamsParser(self._request)
user_id = parser.get_str("user_id", require=True, method="POST", errcode=3211)
arrangement_id = parser.get_int("arrangement_id", require=True, method="POST", errcode=3165)
account = EducationModel.EduAccount.objects.filter(school=self.school, username=user_id, role=0)
if not account.exists():
raise WeJudgeError(3005)
account = account[0]
arrangement = self.course.arrangements.filter(id=arrangement_id)
if not arrangement.exists():
raise WeJudgeError(3200)
arrangement = arrangement[0]
if EducationModel.AsgnVisitRequirement.objects.filter(asgn=self.asgn, author=account):
raise WeJudgeError(3166)
avr = EducationModel.AsgnVisitRequirement()
avr.arrangement = arrangement
avr.asgn = self.asgn
avr.author = account
avr.save()
# 删除调课请求信息
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def delete_visit_requirement(self):
"""
删除调课请求信息
:return:
"""
parser = ParamsParser(self._request)
vrid = parser.get_int("id", require=True, method="POST")
avr = EducationModel.AsgnVisitRequirement.objects.filter(asgn=self.asgn, id=vrid)
if not avr.exists():
raise WeJudgeError(3167)
avr = avr[0]
avr.delete()
# 删除作业
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def delete_asgn(self):
# 删除作业
parser = ParamsParser(self._request)
agree = parser.get_boolean("agree", False, method="POST")
if not agree:
raise WeJudgeError(7)
course = self.asgn.course
self.asgn.delete()
return course.id
# ====== Asgn Statistic ======
# 获取统计用的原始数据
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def get_statistic_data(self):
"""
获取统计用的原始数据
:return:
"""
# 作业数据
asgn = self.asgn.json(items=[
'title', 'create_time', 'full_score'
])
asgn['problems'] = [ap.json(items=[
'id', 'entity', 'entity__id', 'entity__title', 'index', 'accepted', 'submission', 'require',
'lang', 'score', 'strict_mode', 'max_score_for_wrong'
])for ap in self.asgn.problems.all()]
# 实验报告数据
report_list = [report.json(items=[
'author', 'author__id', 'author__username', 'author__nickname', 'author__realname',
'judge_score', 'finally_score', 'ac_counter', 'submission_counter', 'solved_counter',
'create_time', 'start_time', 'modify_time', 'teacher_check', 'excellent', 'rank_solved', 'rank_timeused'
], timestamp=True, timestamp_msec=True) for report in self.asgn.asgnreport_set.all()]
# Solutions数据
solution_list = [solution.json(items=[
'author_id', 'problem_id', 'score', 'accepted', 'submission', 'penalty', 'best_memory', 'best_time',
'best_code_size', 'create_time', 'first_ac_time', 'used_time', 'used_time_real'
], timestamp=True, timestamp_msec=True) for solution in self.asgn.solution_set.all()]
# JudgeStatus数据
judge_status_list = [js.json(items=[
'problem_id', 'virtual_problem_id', 'author_id', 'flag', 'lang', 'create_time', 'exe_time',
'exe_mem', 'code_len'
], timestamp=True, timestamp_msec=True) for js in self.asgn.judge_status.all()]
return {
"asgn": asgn,
"reports": report_list,
"solutions": solution_list,
"judge_status": judge_status_list
}
#代码打包实现
@EducationBaseController.login_validator
@EducationBaseController.check_asgn_visit_validator()
@EducationBaseController.check_user_privilege_validator(2)
def asgn_zip_the_codes(self , asgn_id):
"""
代码打包实现
:param asgn_id:
:return:
"""
asgn = self.asgn
parser = ParamsParser(self._request)
encoding = parser.get_str('encoding', 'gbk', method='POST')
separators = self._request.POST.get('separators' , '\\')
if separators != '\\' and separators !='/':
separators = '\\'
filename = "%s.zip" % tools.uuid4()
storage = WeJudgeStorage(system.WEJUDGE_STORAGE_ROOT.EXPORT_TEMP_DIR , 'asgn_pack')
zf = zipfile.ZipFile(storage.get_file_path(filename), "w", zipfile.zlib.DEFLATED)
judge_status = asgn.judge_status.filter(flag=0)
for status in judge_status:
if not storage.exists(status.code_path):
return
upload_code = storage.get_file_path(status.code_path)
stor_name = u"%s_%s%c%s_%s.%s" % (
status.author.username, status.author.realname,
separators, status.problem.id, status.id,
system.WEJUDGE_CODE_FILE_EXTENSION.get(status.lang)
)
# if encoding == 'utf-8':
zf.write(upload_code, stor_name)
# else:
# zf.write(upload_code, stor_name.decode('utf-8').encode('gbk'))
zf.close()
fp = storage.open_file(filename , 'rb')
def read_file(buf_size = 8192):
while True:
c = fp.read(buf_size)
if c:
yield c
else:
break
fp.close()
response = HttpResponse(
read_file(),
content_type = "application/octet-stream"
)
response['Content-Length'] = os.path.getsize(storage.get_file_path(filename))
response['Content-Disposition'] = "attachment; filename = %s" % filename
return response
# ====== Provider =======
# 读取或者创建作业报告(非VIEW)
def get_asgn_report(self, report_id=None):
"""
读取或者创建作业报告
:param report_id: 如果为None则是自动处理学生的,否则视作教师访问
:return:
"""
asgn = self.asgn
author = self.session.account
if report_id is None:
# 非学生不可生成实验报告
if author.role >= 1:
return None
arp = EducationModel.AsgnReport.objects.filter(asgn=asgn, author=author)
if not arp.exists():
# 获取当前学生的排课情况
flag, st, et = self._get_students_arrangment()
if not flag:
raise WeJudgeError(3105)
arp = EducationModel.AsgnReport()
arp.author = author
arp.asgn = asgn
# 把排课时间写入到start_time字段,作为排行的计时器判定依据,不随排课改变
arp.start_time = st
arp.save()
else:
arp = arp[0]
return arp
else:
arp = EducationModel.AsgnReport.objects.filter(id=report_id)
if arp.exists():
arp = arp[0]
# 非己访问
if author.role == 0 and arp.author != author:
raise WeJudgeError(3103)
return arp
else:
raise WeJudgeError(3102)
# 读取或创建当前用户的解决问题信息(非VIEW)
def get_asgn_solution(self):
"""
读取或创建当前用户的解决问题信息
:return:
"""
asgn = self.asgn
author = self.session.account
apv = EducationModel.Solution.objects.filter(asgn=asgn, author=author, problem=self.asgn_problem_item)
if not apv.exists():
apv = EducationModel.Solution()
apv.problem = self.asgn_problem_item
apv.author = author
apv.asgn = asgn
apv.save()
else:
apv = apv[0]
return apv
# 通过作业信息获取题目信息(或者根据题库信息)(非VIEW)
def get_problem(self, pid):
"""
获取problem信息
:param pid:
:return:
"""
asgn = self.asgn
if asgn is None:
return
problem_item = asgn.problems.filter(id=pid)
if not problem_item.exists():
raise WeJudgeError(3201) # 找不到题目信息
problem_item = problem_item[0]
self.asgn_problem_item = problem_item
self.problem = problem_item.entity
# 获取作业的排课权限信息(非VIEW)
def get_asgn_arrangements(self):
access_control = self.asgn.access_control.all()
return [{
"id": access.arrangement.id,
'name': access.arrangement.name,
'toString': access.arrangement.toString()
} for access in access_control]
# 获取作业参考答案 (主体实现)(非VIEW)
def _get_answer(self):
# 要输出的内容:每道题的不同语言的代码、优秀学生的代码(如果公开的话)
problems_codes = []
problems = self.asgn.problems.order_by('index')
for problem in problems:
if problem.hidden_answer:
continue
problem_entity = problem.entity
judge_config = self._get_judge_config(problem_entity)
problems_codes.append({
"id": problem.id,
"datas": {
"judge_type": judge_config.get('judge_type', {}),
"demo_cases": judge_config.get('demo_cases', {}),
"demo_answer_cases": judge_config.get('demo_answer_cases', {}),
"demo_code_cases": judge_config.get('demo_code_cases', {}),
"answer_cases": judge_config.get('answer_cases', {})
}
})
reports_codes = []
reports = EducationModel.AsgnReport.objects.filter(asgn=self.asgn, public_code=True)
for report in reports:
judge_status = self.asgn.judge_status.filter(author=report.author, flag=0)
status_list = []
for status in judge_status:
try:
result_content = JudgeResult(status.result)
except Exception as ex:
continue
status_info = status.json(items=[
"id", "virtual_problem_id", "lang", "exe_time", "exe_mem", "code_len"
])
status_info["finally_code"] = result_content.finally_code
status_list.append(status_info)
reports_codes.append({
"author": report.author.json(items=['id', 'realname', 'username']),
"judge_status": status_list
})
return {
"status": "ok",
"problems": {problem.id: problem.json(items=[
"id", "index", "entity", "entity__id", "entity__title"
])for problem in problems},
"problems_codes": problems_codes,
"reports_codes": reports_codes
}
# 重计算题目的索引
def _recalc_problems_index(self):
"""
重计算题目的索引
:return:
"""
problems = self.asgn.problems.order_by('index')
count = 1
for problem in problems:
problem.index = count
problem.save()
count += 1
return True
|
[
"lancelrq@gmail.com"
] |
lancelrq@gmail.com
|
1e2946896d4c47c7579f8a3978604ac008629e1a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/662nTYb83Lg3oNN79_16.py
|
02002041c72424b9627784be686f59e08bd20ce0
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from itertools import permutations as P
def is_parallelogram(lst):
return any(x[0][0]+x[1][0]==x[2][0]+x[3][0] and x[0][1]+x[1][1]==x[2][1]+x[3][1] for x in P(lst))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
15a18a233c82aab8465146fa2ef902bd92b9d6cf
|
70840a46ba58599f37214625962b5091fa1a7721
|
/FOR/Áfangi 1/Æfingarverkefni/07_AEfingarverkefni/Æfingarverkefni 7.py
|
cd909b6d094ef667e2d4af7c2885e9e173615615
|
[] |
no_license
|
OliEyEy/Git_FOR
|
9723575365f34cd929a3b31bab7da5fea79cda94
|
2a70c14bb377e01997bc209affdcb8244187f29e
|
refs/heads/master
| 2021-01-13T04:28:00.100885
| 2017-01-24T14:22:55
| 2017-01-24T14:22:55
| 79,913,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
#Ólafur Eysteinn - Æfingarverkefni 7 - 18. Október 2016
import random
#Liður 1
print("Liður 1")
tala= random.randint(1,6)
print(tala)
#Liður 2
print("Liður 2")
summa=0
for x in range(5):
tala=random.randint(1,6)
print(tala)
summa=summa+tala
print(summa)
#Liður 3
print("Liður 3")
summa=0
summa2=0
for x in range(25):
tala=random.randint(1,100)
summa=summa+tala
if tala%2!=0:
print("Hei ég fann oddatölu.",tala)
summa2=summa2+tala
else:
print(tala)
print("Summa allra oddatalnanna",summa2)
print("Summa allra talnanna",summa)
#Liður 4 (eða 5)
print("Liður 4 (eða 5)")
teljari=0
tala2=99
for x in range(250):
tala=random.randint(25,115)
print(tala)
if tala==73:
print("Fékk 73")
break
if tala==99:
tala2=99
teljari=teljari+1
print(tala2,"kom upp",teljari,"sinnum.")
#Skæri, Blað, Steinn
nafn = input("Hvað heitiru?")
aldur = int(input("Hvað ertu gamall/gömul?"))
on=1
hveOft=0
sigur = 0
tap = 0
jafntefli = 0
while on==1:
hveOft=hveOft+1
print("Skæri, blað, steinn")
valmynd=int(input("Vilt þú gera 1 = skæri, 2 = blað, 3 = steinn eða 4 = Hætta ?"))
if valmynd==1:
tala=random.randint(1,3)
if tala == 1:
print("Forritið valdi skæri. Jafntefli!")
jafntefli = jafntefli + 1
elif tala == 2:
print("Forritið valdi blað. Þú vannst!")
sigur = sigur + 1
elif tala == 3:
print("Forritið valdi stein. Þú tapaðir!")
tap = tap + 1
elif valmynd==2:
if tala == 1:
print("Forritið valdi skæri. Þú tapaðir!")
tap = tap + 1
elif tala == 2:
print("Forritið valdi blað. Jafntefli!")
jafntefli = jafntefli + 1
elif tala == 3:
print("Forritið valdi stein. Þú vannst!")
sigur = sigur + 1
elif valmynd==3:
if tala == 1:
print("Forritið valdi skæri. Þú vannst!")
sigur = sigur + 1
elif tala == 2:
print("Forritið valdi blað. Þú tapaðir!")
tap = tap + 1
elif tala == 3:
print("Forritið valdi stein. Jafntefli!")
jafntefli = jafntefli + 1
elif valmynd==4:
print("Þú vannst",sigur,"sinnum.")
print("Þú tapaðir",tap,"sinnum.")
print("Það var jafntefli",jafntefli,"sinnum.")
print("Þú spilaðir",hveOft,"sinnum.")
print("Þú heitir",nafn,"og ert",aldur,"ára gamall.")
on=0
print("Takk fyrir að spila.")
|
[
"olafurey298@nemi.tskoli.is"
] |
olafurey298@nemi.tskoli.is
|
76d60ba875fd7d2705dc0a29a8648b81e1c5d2e5
|
ad290a728ba1566925b69774a1808c4ab94f3ece
|
/models.py
|
2c6656568a20766f7543719a48f8f563a09ecc7f
|
[
"MIT"
] |
permissive
|
icemen380/voice-disciminator
|
73d9f4f675a52949bc35822ef3e64b7572f9dc4d
|
ad17af69a5230bf306fa9bb8e42e4e60cbbb0b8b
|
refs/heads/master
| 2021-02-12T23:08:55.246708
| 2018-06-21T14:40:04
| 2018-06-21T14:40:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,750
|
py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
""" A set of models """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorpack.graph_builder.model_desc import ModelDesc, InputDesc
from tensorpack.train.tower import get_current_tower_context
from hparam import hparam as hp
from modules.cbhg import conv1d, normalize, highwaynet, gru
from modules.wavenet import WaveNet
class BinaryClassificationModel(ModelDesc):
"""
n = batch size
t = timestep size
h = hidden size
"""
def __init__(self, hidden_units, num_highway, norm_type):
self.hidden_units = hidden_units
self.num_highway = num_highway
self.norm_type = norm_type
def discriminate(self, wav, melspec, is_training=False, threshold=0.9, name='discriminator'):
"""
:param melspec: shape=(n, t, n_mels)
:param is_training
:param threshold
:param name
:return: prediction. shape=(n, 1)
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
out = conv1d(melspec, self.hidden_units, 3, scope='conv1d_1') # (n, t, h)
out = normalize(out, type=self.norm_type, is_training=is_training, activation_fn=tf.nn.leaky_relu,
scope='norm1')
out = conv1d(out, self.hidden_units, 3, scope='conv1d_2') # (n, t, h)
for i in range(self.num_highway):
out = highwaynet(out, num_units=self.hidden_units, scope='highwaynet_{}'.format(i)) # (n, t, h)
out = gru(out, self.hidden_units, False) # (n, t, h)
# take the last output
out = out[..., -1, :] # (n, h)
# discrimination
out = tf.layers.dense(out, self.hidden_units // 2, activation=tf.nn.leaky_relu) # (n, h/2)
out = normalize(out, type=self.norm_type, is_training=is_training, scope='norm2')
out = tf.layers.dense(out, self.hidden_units // 4, activation=tf.nn.leaky_relu) # (n, h/4)
out = normalize(out, type=self.norm_type, is_training=is_training, scope='norm3')
# prob = tf.layers.dense(out, 1, name='prob', activation=tf.nn.sigmoid) # (n, 1)
logits = tf.layers.dense(out, 2, name='prob') # (n, 2)
prob = tf.nn.softmax(logits) # (n, 2)
pred = tf.greater(prob, threshold) # (n, 2)
return logits, prob, pred
def loss(self):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
# tar_labels, ntar_labels = self.labels[:, 1], self.labels[:, 0]
# loss = -(tar_labels * tf.log(self.prob) + ntar_labels * tf.log(1. - self.prob)) # cross entropy
loss = tf.reduce_mean(loss, name='loss')
return loss
def _get_inputs(self):
length_melspec = hp.signal.length // hp.signal.hop_length + 1
return [InputDesc(tf.float32, (None, hp.signal.length), 'wav'),
InputDesc(tf.float32, (None, length_melspec, hp.signal.n_mels), 'melspec'),
InputDesc(tf.float32, (None, 2), 'labels')]
def _build_graph(self, inputs):
self.wav, self.melspec, self.labels = inputs
is_training = get_current_tower_context().is_training
self.logits, self.prob, self.pred = self.discriminate(self.wav, self.melspec, is_training,
threshold=hp.disc.threshold) # (n, 1), (n, 1), (n, 1)
self.cost = self.loss()
# summaries
tf.summary.scalar('train/loss', self.cost)
# tf.summary.scalar('train/accuracy', self.accuracy())
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=hp.train.lr, trainable=False)
return tf.train.AdamOptimizer(lr)
class WaveNetClassificationModel(ModelDesc):
def __init__(self, dilations, residual_channels, dilation_channels, quantization_channels, skip_channels,
dropout=0):
self.dilations = dilations
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.quantization_channels = quantization_channels
self.skip_channels = skip_channels
self.dropout = dropout
def discriminate(self, wav, melspec, is_training=False, threshold=0.9, name='discriminator'):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# prenet for dropout
out = tf.expand_dims(wav, axis=-1) # (n, t, 1)
out = tf.layers.dense(out, self.quantization_channels // 2, name='dense1') # (n, t, h/2)
out = tf.layers.dropout(out, rate=self.dropout, training=is_training, name='dropout1')
out = tf.layers.dense(out, self.quantization_channels, name='dense2') # (n, t, h)
out = tf.layers.dropout(out, rate=self.dropout, training=is_training, name='dropout2')
wavenet = WaveNet(
dilations=self.dilations,
residual_channels=self.residual_channels,
dilation_channels=self.dilation_channels,
quantization_channels=self.quantization_channels,
skip_channels=self.skip_channels,
use_biases=True,
is_training=is_training)
out = wavenet(out)
# take the last output
out = out[..., -1, :] # (n, h)
# discrimination
out = tf.layers.dense(out, self.quantization_channels // 2, activation=tf.nn.leaky_relu) # (n, h/2)
logits = tf.layers.dense(out, 2, name='prob') # (n, 2)
prob = tf.nn.softmax(logits) # (n, 2)
pred = tf.greater(prob, threshold) # (n, 2)
return logits, prob, pred
def loss(self, alpha=1.):
# loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
tar_labels, ntar_labels = self.labels[:, 1], self.labels[:, 0]
tar_prob, ntar_prob = self.prob[:, 1], self.prob[:, 0]
loss = -(tar_labels * tf.log(tar_prob) + alpha * ntar_labels * tf.log(ntar_prob)) # cross entropy
loss = tf.reduce_mean(loss, name='loss')
return loss
def _get_inputs(self):
length_melspec = hp.signal.length // hp.signal.hop_length + 1
return [InputDesc(tf.float32, (None, hp.signal.length), 'wav'),
InputDesc(tf.float32, (None, length_melspec, hp.signal.n_mels), 'melspec'),
InputDesc(tf.float32, (None, 2), 'labels')]
def _build_graph(self, inputs):
self.wav, _, self.labels = inputs
is_training = get_current_tower_context().is_training
self.logits, self.prob, self.pred = self.discriminate(self.wav, is_training,
threshold=hp.disc.threshold)
self.cost = self.loss()
# summaries
tf.summary.scalar('train/loss', self.cost)
# tf.summary.scalar('train/accuracy', self.accuracy())
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=hp.train.lr, trainable=False)
return tf.train.AdamOptimizer(lr)
class DefaultModel(BinaryClassificationModel):
def __init__(self):
model_params = {
'num_highway': 4,
'hidden_units': 128,
'norm_type': 'ins'
}
super().__init__(**model_params)
class WaveNet3072ClsModel(WaveNetClassificationModel):
def __init__(self):
model_params = {
'dilations':
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512],
'residual_channels': 16,
'dilation_channels': 16,
'skip_channels': 32,
'quantization_channels': 32
}
super().__init__(**model_params)
class WaveNet1024ClsModel(WaveNetClassificationModel):
def __init__(self):
model_params = {
'dilations':
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512],
'residual_channels': 16,
'dilation_channels': 16,
'skip_channels': 32,
'quantization_channels': 32
}
super().__init__(**model_params)
class WaveNetDropout1024ClsModel(WaveNetClassificationModel):
def __init__(self):
model_params = {
'dilations':
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512],
'residual_channels': 16,
'dilation_channels': 16,
'skip_channels': 32,
'quantization_channels': 32,
'dropout': 0.5
}
super().__init__(**model_params)
|
[
"andabi412@gmail.com"
] |
andabi412@gmail.com
|
e8f912ba24a1988d682968d631796372e1ac7623
|
483ef5be9fb7e753a2bcc33b0a94f97e2c2c6718
|
/app/core/migrations/0003_ingredient.py
|
7c1a7d2bab6c24f1177f2d3a2d425be24c5007cd
|
[
"MIT"
] |
permissive
|
Abcaran/recipe-app-api
|
df9a1c647559a2dae8c1b6d7a68557abc520af26
|
0271af3740b7271c35ff142cbec7f34a0932fb89
|
refs/heads/master
| 2021-06-27T23:22:59.275249
| 2020-09-22T02:18:27
| 2020-09-22T02:18:27
| 239,192,129
| 0
| 0
|
MIT
| 2021-06-17T06:10:40
| 2020-02-08T19:26:26
|
Python
|
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.0.3 on 2020-02-29 18:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"samirsaadi@gmail.com"
] |
samirsaadi@gmail.com
|
d0edaa630a619d22e42664218df89d825e90b444
|
94c0da7c0899d412c06e39d0f722ecee477dfe68
|
/25-刘杰-北京/第四周/canny_manual.py
|
0caba3d393fa6c9f80bc86db1babf021ad3e3879
|
[] |
no_license
|
Xiaoguo666205/badou-Turing
|
e72b228ed94c20505b4d23b38cf9dce7121ff948
|
7d109fd1ce462fb32556f8e71038d66eb654a401
|
refs/heads/main
| 2023-07-25T02:33:33.610428
| 2021-09-12T01:12:41
| 2021-09-12T01:12:41
| 383,296,276
| 0
| 0
| null | 2021-07-06T00:35:27
| 2021-07-06T00:35:26
| null |
UTF-8
|
Python
| false
| false
| 6,187
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@Project :badou-Turing
@File :canny_manual.py
@Author :luigi
@Date :2021/7/13 下午3:48
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from 第三周 import convolution_manual
def canny(gray, ksize, sigma,lower,upper):
""" canny算法实现,分4步:
step#1:将灰度图高斯平滑
step#2:通过sobel进行边缘提取
step#3:非极大值抑制
step#4:双阈值检测和链接边缘
:param gray: 要检测边缘的灰度图
:type gray: np.ndarray(np.uint8)
:param ksize: 高斯核的size
:type ksize: int
:param sigma: 高斯核的sigma,决定了高斯分布的形态
:type sigma: float
:param lower: 双阈值的低阈值
:type lower: int
:param upper: 双阈值的低阈值
:type upper: int
:return:
:rtype:
"""
# 高斯平滑
gaussian_1d = cv2.getGaussianKernel(5, sigma)
gaussian_2d = np.dot(gaussian_1d, gaussian_1d.transpose())
blur = convolution_manual.convolute(gray, gaussian_2d, padding=2, mode="same")
# blur = cv2.GaussianBlur(gray, (ksize, ksize), sigma)
# 计算图像梯度模与方向
sobel_x = np.array(((-1,0,1),(-2,0,2),(-1,0,1)))
sobel_y = np.array(((-1,-2,-1),(0,0,0),(1,2,1)))
gradient_x = convolution_manual.convolute(blur, sobel_x, padding=1, mode="same")
gradient_y = convolution_manual.convolute(blur, sobel_y, padding=1, mode="same")
gradient = np.sqrt(gradient_x**2 + gradient_y**2)
# gradient = np.sqrt(np.square(gradient_x) + np.square(gradient_y**2))
gradient_x[gradient_x == 0] = 0.00000001
gradient_theta_tan = gradient_y/gradient_x
# 非极大值抑制NMS
gradientWithBorder = cv2.copyMakeBorder(gradient, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
# case1: 0°< theta <=45°
mask1 = (gradient_theta_tan>0) & (gradient_theta_tan<=1)
coordinate1 = np.where(mask1)
x,y = coordinate1
weight1 = gradient_theta_tan[x,y]
dTempA = gradientWithBorder[x-1, y+1]*weight1 + gradientWithBorder[x, y+1]*(1-weight1)
dTempB = gradientWithBorder[x, y-1]*weight1 + gradientWithBorder[x+1, y-1]*(1-weight1)
dTempMax1 = np.where(dTempA>dTempB, dTempA, dTempB)
# case2: 45°< theta <=90°
mask2 = (gradient_theta_tan>1)
coordinate2 = np.where(mask2)
x,y = coordinate2
weight2 = gradient_theta_tan[x,y]
dTempA = gradientWithBorder[x-1, y]*weight2 + gradientWithBorder[x-1, y+1]*(1-weight2)
dTempB = gradientWithBorder[x+1, y-1]*weight2 + gradientWithBorder[x+1, y]*(1-weight2)
dTempMax2 = np.where(dTempA>dTempB, dTempA, dTempB)
# case3: 90°< theta <=135°
mask3 = (gradient_theta_tan<=-1)
coordinate3 = np.where(mask3)
x,y = coordinate3
weight3 = gradient_theta_tan[x,y]*(-1) #weight need to be postive value
dTempA = gradientWithBorder[x-1, y-1]*weight3 + gradientWithBorder[x-1, y]*(1-weight3)
dTempB = gradientWithBorder[x+1, y]*weight3 + gradientWithBorder[x+1, y+1]*(1-weight3)
dTempMax3 = np.where(dTempA>dTempB, dTempA, dTempB)
# case4: 135°< theta <=180°
mask4 = (gradient_theta_tan>-1) & (gradient_theta_tan<=0)
coordinate4 = np.where(mask4)
x,y = coordinate4
weight4 = gradient_theta_tan[x,y]*(-1) #weight need to be postive value
dTempA = gradientWithBorder[x-1, y-1]*weight4 + gradientWithBorder[x, y-1]*(1-weight4)
dTempB = gradientWithBorder[x, y+1]*weight4 + gradientWithBorder[x+1, y+1]*(1-weight4)
dTempMax4 = np.where(dTempA>dTempB, dTempA, dTempB)
# combine all 4 coordinates together
coordinateX = np.concatenate((coordinate1[0], coordinate2[0], coordinate3[0], coordinate4[0]))
coordinateY = np.concatenate((coordinate1[1], coordinate2[1], coordinate3[1], coordinate4[1]))
# compare each gradient point with the max sub-pixel in gradient direction
candicate = gradient[coordinateX, coordinateY]
dTempMax = np.concatenate((dTempMax1,dTempMax2,dTempMax3,dTempMax4))
fake_gradient_exclude = np.where(candicate < dTempMax, 0, candicate)
nms = gradient.copy()
nms[coordinateX, coordinateY] = fake_gradient_exclude
nms = nms[np.arange(gray.shape[0])].astype(np.uint8)
# 双阈值检测
doubleThreshold = np.where(nms < lower, 0, np.where(nms > upper, 255, nms))
maskMid = (nms > lower) & (nms < upper)
coordinate = np.where(maskMid)
x,y = coordinate
doubleThresholdWithBorder = cv2.copyMakeBorder(doubleThreshold, 1, 1, 1, 1, cv2.BORDER_REPLICATE)
maskMax = doubleThresholdWithBorder[x,y]
for i in range(-1,2):
for j in range(-1,2):
if not ((i==0) & (j==0)):
maskMax = np.maximum(doubleThresholdWithBorder[x+i,y+i],maskMax)
midThreshold = np.where((doubleThreshold[x,y]<maskMax), 0, 255)
doubleThreshold[x,y] = midThreshold
canny_manual = doubleThreshold[np.arange(gray.shape[0])]
#cv2提供的接口(用于输出对比)
canny_cv = cv2.Canny(blur.astype(np.uint8), lower, upper, apertureSize=3, L2gradient=True)
# candy_cv = cv2.Canny(blur.astype(np.uint8), 0, 300)
plt.subplot(221)
plt.tight_layout(pad=2.0) # subplot间距
plt.title("gray")
plt.imshow(gray, cmap='gray')
plt.subplot(222)
plt.tight_layout(pad=2.0) # subplot间距
plt.title("blur")
plt.imshow(blur, cmap='gray')
plt.subplot(223)
plt.title("canny_manual")
plt.tight_layout(pad=2.0) # subplot间距
plt.imshow(canny_manual, cmap='gray')
plt.subplot(224)
plt.title("canny_cv")
plt.tight_layout(pad=2.0) # subplot间距
plt.imshow(canny_cv, cmap='gray')
plt.show()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-p","--path",required=True, help="path for input image")
args = vars(ap.parse_args())
image = cv2.imread(args["path"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ksize=5
sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8
m = np.median(gray)
weight = 0.33
lower = int(max(0, (1.0-weight) * m))
upper = int(min(255, (1.0+weight) * m))
canny(gray,ksize,sigma,lower,upper)
if __name__ == '__main__':
main()
|
[
"86213076+luigide2020@users.noreply.github.com"
] |
86213076+luigide2020@users.noreply.github.com
|
bb51fe8292d8e854f54a19446876ef39146db3b1
|
6ec6c942d4e0500b1717290b6c2b6565bf8e6755
|
/bk/_future_rb/_1_MyTradingMsg.py
|
3f7784f81168badbbda0f9b2ff04379850c31a32
|
[] |
no_license
|
jacklaiu/CtaTrading
|
e3a1fb6d857624f9712957bdc0dcc822c4047b98
|
75f5770944ff1aaaa97d7dbb78ea7da83b3fd897
|
refs/heads/master
| 2020-04-08T16:53:01.841209
| 2018-12-24T08:59:45
| 2018-12-24T08:59:45
| 159,539,934
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,471
|
py
|
# encoding: UTF-8
import json
import base.Dao as dao
import jqdatasdk
import base.Util as util
#上期所
#
# 代码 名称 代码 名称
# AG9999.XSGE 白银主力合约 PB9999.XSGE 铅主力合约
# AU9999.XSGE 黄金主力合约 RB9999.XSGE 螺纹钢主力合约
# AL9999.XSGE 铝主力合约 RU9999.XSGE 天然橡胶主力合约
# BU9999.XSGE 沥青主力合约 SN9999.XSGE 锡主力合约
# CU9999.XSGE 铜主力合约 WR9999.XSGE 线材主力合约
# FU9999.XSGE 燃油主力合约 ZN9999.XSGE 锌主力合约
# HC9999.XSGE 热卷主力合约 NI9999.XSGE 镍主力合约
# 郑商所
#
# 代码 名称 代码 名称
# CY9999.XZCE 棉纱主力合约 RM9999.XZCE 菜籽粕主力合约
# CF9999.XZCE 棉花主力合约 RM9999.XZCE 菜籽粕主力合约
# FG9999.XZCE 玻璃主力合约 RS9999.XZCE 油菜籽主力合约
# JR9999.XZCE 粳谷主力合约 SF9999.XZCE 硅铁主力合约
# LR9999.XZCE 晚稻主力合约 SM9999.XZCE 锰硅主力合约
# MA9999.XZCE 甲醇主力合约 SR9999.XZCE 白糖主力合约
# TA9999.XZCE PTA主力合约
# OI9999.XZCE 菜油主力合约
# PM9999.XZCE 普麦主力合约 ZC9999.XZCE 动力煤主力合约
# AP9999.XZCE 苹果主力合约
# 大商所
#
# 代码 名称 代码 名称
# A9999.XDCE 豆一主力合约 JD9999.XDCE 鸡蛋主力合约
# B9999.XDCE 豆二主力合约 JM9999.XDCE 焦煤主力合约
# BB9999.XDCE 胶板主力合约 L9999.XDCE 聚乙烯主力合约
# C9999.XDCE 玉米主力合约 M9999.XDCE 豆粕主力合约
# CS9999.XDCE 淀粉主力合约 P9999.XDCE 棕榈油主力合约
# FB9999.XDCE 纤板主力合约 PP9999.XDCE 聚丙烯主力合约
# I9999.XDCE 铁矿主力合约 V9999.XDCE 聚氯乙烯主力合约
# J9999.XDCE 焦炭主力合约 Y9999.XDCE 豆油主力合约
jqdata_security = 'RB9999.XDCE'
frequency = '10m'
max = 1
strategy = 'MutilEMaStrategy'
enableTrade = False
enableBuy = True
enableShort = True
def init():
#(1) 更新t_position,没有就插入
dao.updateAllPosition(max, jqdata_security)
#(2) 更新CTA_setting.json
with open('CTA_setting.json', 'w') as json_file:
json_file.write(json.dumps(
[
{
"name": strategy,
"className": strategy,
"vtSymbol": util.get_CTA_setting_dominant_future(jqSecurity=jqdata_security)
}
]
))
print "################# My t_position and CTA_setting.json REFRESH! ######################"
|
[
"jacklaiu@qq.com"
] |
jacklaiu@qq.com
|
7cca7a3aecf5d22646cf68b3fe058c7125d754b7
|
c7fc816b6f6968c0110ce68927dd9e2b050d369c
|
/02_Delta-3.1.3-IL (1)/Python/Delta 03 - Append.py
|
b0571c979101122038e97565db29da5d6d759c8c
|
[] |
no_license
|
Hanish-Repo-01/Databricks-Delta-Table
|
385fb17fe98b4e45ac687c859c375e2e8b71fb23
|
7873d08eee9a6d87d10f02c8108d27614b096e9e
|
refs/heads/main
| 2023-06-27T16:41:08.673213
| 2021-07-11T19:18:58
| 2021-07-11T19:18:58
| 384,949,354
| 2
| 1
| null | 2021-07-11T19:18:59
| 2021-07-11T13:03:05
|
Scala
|
UTF-8
|
Python
| false
| false
| 12,337
|
py
|
# Databricks notebook source
# MAGIC
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px; height: 163px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Databricks Delta Batch Operations - Append
# MAGIC
# MAGIC Databricks® Delta allows you to read, write and query data in data lakes in an efficient manner.
# MAGIC
# MAGIC ## In this lesson you:
# MAGIC * Append new records to a Databricks Delta table
# MAGIC
# MAGIC ## Audience
# MAGIC * Primary Audience: Data Engineers
# MAGIC * Secondary Audience: Data Analysts and Data Scientists
# MAGIC
# MAGIC ## Prerequisites
# MAGIC * Web browser: **Chrome**
# MAGIC * A cluster configured with **8 cores** and **DBR 6.2**
# MAGIC * Suggested Courses from <a href="https://academy.databricks.com/" target="_blank">Databricks Academy</a>:
# MAGIC - ETL Part 1
# MAGIC - Spark-SQL
# MAGIC
# MAGIC ## Datasets Used
# MAGIC We will use online retail datasets from
# MAGIC * `/mnt/training/online_retail` in the demo part and
# MAGIC * `/mnt/training/structured-streaming/events/` in the exercises
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Setup
# MAGIC
# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>
# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Refresh Base Data Set
# COMMAND ----------
inputPath = "/mnt/training/online_retail/data-001/data.csv"
inputSchema = "InvoiceNo STRING, StockCode STRING, Description STRING, Quantity INT, InvoiceDate STRING, UnitPrice DOUBLE, CustomerID INT, Country STRING"
parquetDataPath = workingDir + "/customer-data/"
(spark.read
.option("header", "true")
.schema(inputSchema)
.csv(inputPath)
.write
.mode("overwrite")
.format("parquet")
.partitionBy("Country")
.save(parquetDataPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC Create table out of base data set
# COMMAND ----------
spark.sql("""
CREATE TABLE IF NOT EXISTS {}.customer_data
USING parquet
OPTIONS (path = '{}')
""".format(databaseName, parquetDataPath))
spark.sql("MSCK REPAIR TABLE {}.customer_data".format(databaseName))
# COMMAND ----------
# MAGIC %md
# MAGIC The original count of records is:
# COMMAND ----------
sqlCmd = "SELECT count(*) FROM {}.customer_data".format(databaseName)
origCount = spark.sql(sqlCmd).first()[0]
print(origCount)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Read in Some New Data
# COMMAND ----------
inputSchema = "InvoiceNo STRING, StockCode STRING, Description STRING, Quantity INT, InvoiceDate STRING, UnitPrice DOUBLE, CustomerID INT, Country STRING"
miniDataInputPath = "/mnt/training/online_retail/outdoor-products/outdoor-products-mini.csv"
newDataDF = (spark
.read
.option("header", "true")
.schema(inputSchema)
.csv(miniDataInputPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC Do a simple count of number of new items to be added to production data.
# COMMAND ----------
newDataDF.count()
# COMMAND ----------
# MAGIC %md
# MAGIC ## APPEND Using Non-Databricks Delta pipeline
# MAGIC
# MAGIC Append the new data to `parquetDataPath`.
# COMMAND ----------
(newDataDF
.write
.format("parquet")
.partitionBy("Country")
.mode("append")
.save(parquetDataPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Let's count the rows in `customer_data`.
# MAGIC
# MAGIC We expect to see `36` additional rows, but we do not.
# MAGIC
# MAGIC Why not?
# MAGIC
# MAGIC You will get the same count of old vs new records because the metastore doesn't know about the addition of new records yet.
# COMMAND ----------
sqlCmd = "SELECT count(*) FROM {}.customer_data".format(databaseName)
newCount = spark.sql(sqlCmd).first()[0]
print("The old count of records is {}".format(origCount))
print("The new count of records is {}".format(newCount))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Schema-on-Read Problem Revisited
# MAGIC
# MAGIC We've added new data the metastore doesn't know about.
# MAGIC
# MAGIC * It knows there is a `Sweden` partition,
# MAGIC - but it doesn't know about the 19 new records for `Sweden` that have come in.
# MAGIC * It does not know about the new `Sierra-Leone` partition,
# MAGIC - nor the 17 new records for `Sierra-Leone` that have come in.
# MAGIC
# MAGIC Here are the the original table partitions:
# COMMAND ----------
sqlCmd = "SHOW PARTITIONS {}.customer_data".format(databaseName)
originalSet = spark.sql(sqlCmd).collect()
for x in originalSet:
print(x)
# COMMAND ----------
# MAGIC %md
# MAGIC Here are the partitions the new data belong to:
# COMMAND ----------
spark.sql("DROP TABLE IF EXISTS {}.mini_customer_data".format(databaseName))
newDataDF.write.partitionBy("Country").saveAsTable("{}.mini_customer_data".format(databaseName))
sqlCmd = "SHOW PARTITIONS {}.mini_customer_data ".format(databaseName)
newSet = set(spark.sql(sqlCmd).collect())
for x in newSet:
print(x)
# COMMAND ----------
# MAGIC %md
# MAGIC In order to get correct counts of records, we need to make these new partitions and new data known to the metadata.
# MAGIC
# MAGIC To do this, we apply `MSCK REPAIR TABLE`.
# COMMAND ----------
sqlCmd = "MSCK REPAIR TABLE {}.customer_data".format(databaseName)
spark.sql(sqlCmd)
# COMMAND ----------
# MAGIC %md
# MAGIC Count the number of records:
# MAGIC * The count should be correct now.
# MAGIC * That is, 65499 + 36 = 65535
# COMMAND ----------
sqlCmd = "SELECT count(*) FROM {}.customer_data".format(databaseName)
print(spark.sql(sqlCmd).first()[0])
# COMMAND ----------
# MAGIC %md
# MAGIC ## Refresh Base Data Set, Write to Databricks Delta
# COMMAND ----------
deltaDataPath = workingDir + "/customer-data-delta/"
(spark.read
.option("header", "true")
.schema(inputSchema)
.csv(inputPath)
.write
.mode("overwrite")
.format("delta")
.partitionBy("Country")
.save(deltaDataPath) )
# COMMAND ----------
# MAGIC %md
# MAGIC ## APPEND Using Databricks Delta Pipeline
# MAGIC
# MAGIC Next, repeat the process by writing to Databricks Delta format.
# MAGIC
# MAGIC In the next cell, load the new data in Databricks Delta format and save to `../delta/customer-data-delta/`.
# COMMAND ----------
miniDataInputPath = "/mnt/training/online_retail/outdoor-products/outdoor-products-mini.csv"
(newDataDF
.write
.format("delta")
.partitionBy("Country")
.mode("append")
.save(deltaDataPath)
)
# COMMAND ----------
# MAGIC %md
# MAGIC Perform a simple `count` query to verify the number of records and notice it is correct and does not first require a table repair.
# MAGIC
# MAGIC Should have 36 more entries from before.
# COMMAND ----------
sqlCmd = "SELECT count(*) FROM delta.`{}` ".format(deltaDataPath)
print(spark.sql(sqlCmd).first()[0])
# COMMAND ----------
# MAGIC %md
# MAGIC ## More Options?
# MAGIC
# MAGIC Additional Databricks Delta Reader and Writer options are included in the [Extra folder]($./Extra/Delta 01E - RW-Options).
# COMMAND ----------
# MAGIC %md
# MAGIC # LAB
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 1
# MAGIC
# MAGIC 0. Apply the schema provided under the variable `jsonSchema`
# MAGIC 0. Read the JSON data under `streamingEventPath` into a DataFrame
# MAGIC 0. Add a `date` column using `to_date(from_unixtime(col("time"),"yyyy-MM-dd"))`
# MAGIC 0. Add a `deviceId` column consisting of random numbers from 0 to 99 using this expression `expr("cast(rand(5) * 100 as int)")`
# MAGIC 0. Use the `repartition` method to split the data into 200 partitions
# MAGIC
# MAGIC Refer to <a href="http://spark.apache.org/docs/2.1.0/api/python/pyspark.sql.html#" target="_blank">Pyspark function documentation</a>.
# COMMAND ----------
# TODO
from pyspark.sql.functions import expr, col, from_unixtime, to_date
jsonSchema = "action string, time long"
streamingEventPath = "/mnt/training/structured-streaming/events/"
rawDataDF = (spark
.read
FILL_IN
.repartition(200)
# COMMAND ----------
# TEST - Run this cell to test your solution.
schema = str(rawDataDF.schema)
dbTest("assert-1", True, "action,StringType" in schema)
dbTest("assert-2", True, "time,LongType" in schema)
dbTest("assert-3", True, "date,DateType" in schema)
dbTest("assert-4", True, "deviceId,IntegerType" in schema)
print("Tests passed!")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 2
# MAGIC
# MAGIC Write out the raw data.
# MAGIC * Use `overwrite` mode
# MAGIC * Use format `delta`
# MAGIC * Partition by `date`
# MAGIC * Save to `deltaIotPath`
# COMMAND ----------
# TODO
deltaIotPath = workingDir + "/iot-pipeline/"
(rawDataDF
.write
FILL_IN
)
# COMMAND ----------
# TEST - Run this cell to test your solution.
spark.sql("""
CREATE TABLE IF NOT EXISTS {}.iot_data_delta
USING DELTA
LOCATION '{}' """.format(databaseName, deltaIotPath))
try:
tableExists = (spark.table("{}.iot_data_delta".format(databaseName)).count() > 0)
except:
tableExists = False
dbTest("Delta-02-backfillTableExists", True, tableExists)
print("Tests passed!")
# COMMAND ----------
# MAGIC %md
# MAGIC ## Step 3
# MAGIC
# MAGIC Create a new DataFrame with columns `action`, `time`, `date` and `deviceId`. The columns contain the following data:
# MAGIC
# MAGIC * `action` contains the value `Open`
# MAGIC * `time` contains the Unix time cast into a long integer `cast(1529091520 as bigint)`
# MAGIC * `date` contains `cast('2018-06-01' as date)`
# MAGIC * `deviceId` contains a random number from 0 to 499 given by `expr("cast(rand(5) * 500 as int)")`
# COMMAND ----------
# TODO
from pyspark.sql.functions import expr
newDF = (spark.range(10000)
.repartition(200)
.selectExpr("'Open' as action", FILL_IN)
.FILL_IN
# COMMAND ----------
# TEST - Run this cell to test your solution.
total = newDF.count()
dbTest("Delta-03-newDF-count", 10000, total)
print("Tests passed!")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Step 4
# MAGIC
# MAGIC Append new data to `deltaIotPath`
# MAGIC
# MAGIC * Use `append` mode
# MAGIC * Use format `delta`
# MAGIC * Partition by `date`
# MAGIC * Save to `deltaIotPath`
# COMMAND ----------
# TODO
(newDF
.write
FILL_IN
# COMMAND ----------
# TEST - Run this cell to test your solution.
numFiles = spark.sql("SELECT count(*) as total FROM delta.`{}` ".format(deltaIotPath)).first()[0]
dbTest("Delta-03-numFiles", 110000 , numFiles)
print("Tests passed!")
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Cleanup<br>
# MAGIC
# MAGIC Run the **`Classroom-Cleanup`** cell below to remove any artifacts created by this lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Cleanup"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Summary
# MAGIC
# MAGIC In this Lesson we:
# MAGIC * Encountered the schema-on-read problem when appending new data in a traditional data lake pipeline.
# MAGIC * Learned how to append new data to existing Databricks Delta data (that mitigates the above problem).
# MAGIC * Showed how to look at the set of partitions in the data set.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Next Steps
# MAGIC
# MAGIC Start the next lesson, [Upsert]($./Delta 04 - Upsert).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC * <a href="https://docs.databricks.com/delta/delta-batch.html#" target="_blank">Delta Table Batch Read and Writes</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2020 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
|
[
"hanishkumar09@outlook.com"
] |
hanishkumar09@outlook.com
|
6f98de8f1f4c8a5a82fef0450f3c85aef08edf60
|
03dd6342da469b10d0ae9629b2ad09320cdd4941
|
/venv/bin/easy_install-2.7
|
c44b5f49578d1b4458b812f6df8dec4e6355b29a
|
[] |
no_license
|
antoniomalves/blog-flask
|
d6260d68d91553b2395e94d90ac35a493a81a2fe
|
879fdd97d3609c83f3f1edb02eba0ba1c9b727ee
|
refs/heads/master
| 2021-01-10T21:24:35.686261
| 2015-07-28T03:20:27
| 2015-07-28T03:20:27
| 39,688,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
7
|
#!/home/antonio/projetos/projetos_flask/flask-intro/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"antonio.malves.lima@gmail.com"
] |
antonio.malves.lima@gmail.com
|
1877c04e2fd93308373af8f35ce9940b063d169f
|
2cc7557a17310b2ca30753289b190203b8e58097
|
/Library management/bookt.py
|
01317bbec43ab86faaea46208ec7dc07ced4e5b5
|
[] |
no_license
|
Bhavesh43m/library-management
|
54f36132c9f11e675ef0382f7a7fcacccfea8ab1
|
1aceaf29467e9b26fa04817b926697684437e518
|
refs/heads/main
| 2023-03-18T13:16:22.495113
| 2021-03-20T12:38:20
| 2021-03-20T12:38:20
| 349,720,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,263
|
py
|
import pymysql
class library:
def __init__(self):
self.__servername="localhost"
self.__username="root"
self.__password=""
self.__dbname="library1"
try:
self.con=pymysql.connect(self.__servername,self.__username,self.__password,self.__dbname)
print("connection successfull")
except:
print("Connection Error")
def add(self):
self.c=int(input("\t\t Enter book code :"))
self.bn=input("\t\t Enter book name :")
self.a=input ("\t\t Enter book auther name :")
self.pb=input ("\t\t Enter book publication :")
self.sub=input ("\t\t Enter subject :")
self.cn=int(input("\t\t Enter book copy numbers :"))
query="insert into books(code,bookname,auther,publication,subject,copy_no) values(%s,%s,%s,%s,%s,%s)"
val=(self.c,self.bn,self.a,self.pb,self.sub,self.cn)
cur=self.con.cursor()
try:
cur.execute(query,val)
except:
print("Error in the query")
else:
self.con.commit()
print("\t\t\tRecord Insert Successfully ")
print("")
def show(self):
query="select * from books"
cur=self.con.cursor() #prepared cursor
try:
cur.execute(query) #run select query
result=cur.fetchall()
print(result)
except:
print("Record not found")
def update(self):
self.c=int(input ("\t\t\tEnter Book Code for UPDATE : "))
print("\t\t\t1. Update Book name \n\t\t\t2. Update Auther name\n\t\t\t3. Update publication \n\t\t\t4. Update subject \n\t\t\t5.Update number of copies\n\t\t\t6.Exit ")
while(True):
print("\t\t\t=====================================")
ch=int(input("\t\t\tEnter your Choice for Updation : "))
print("\t\t\t=====================================")
if(ch==1):
self.bn=input("\t\t\tEnter updated book name :")
query="update books set bookname=%s where code=%s"
cur=self.con.cursor()
val=(self.bn,self.c)
try:
cur.execute(query,val)
self.con.commit()
print("\t\t\t -- Book name is Updated -- ")
except:
print("\t\t\t Oops ,Record Not Found !")
if(ch==2):
self.a=input("\t\t\tEnter updated auther name : ")
query="update books set auther=%s where code=%s"
cur=self.con.cursor()
val=(self.a,self.c)
try:
cur.execute(query,val)
self.con.commit()
print(" \t\t\t-- Auther is Updated --")
except:
print(" \t\t\tOops ,Record Not Found !")
if(ch==3):
self.pb=input("\t\t\tEnter updated book publication : ")
query="update books set publication=%s where code=%s"
cur=self.con.cursor()
val=(self.pb,self.c)
try:
cur.execute(query,val)
self.con.commit()
print(" \t\t\t-- Publication is Updated --")
except:
print("\t\t\t Oops ,Record Not Found !")
if(ch==4):
self.adr=input("\t\t\tEnter updated subject of book : ")
query="update books set subject=%s where code=%s"
cur=self.con.cursor()
val=(self.sub,self.c)
try:
cur.execute(query,val)
self.con.commit()
print("\t\t\t -- subject is Updated -- ")
except:
print("\t\t\t Oops ,Record Not Found !")
if(ch==5):
self.cn=int(input("\t\t\tEnter New count of Copies : "))
query="update studentadm set copy_no=%s where code=%s"
cur=self.con.cursor()
val=(self.cn,self.c)
try:
cur.execute(query,val)
self.con.commit()
print("\t\t\t -- count of books is Updated -- ")
except:
print("\t\t\t Oops ,Record Not Found !")
if(ch==6):
print("\t\t\t -- All Updation Done -- ")
break
def delete(self):
self.c=int(input ("\t\t\tEnter Book Code for Delete : "))
query="Delete from books WHERE code=%s"
cur=self.con.cursor()
try:
cur.execute(query,self.c)
except:
print("\t\t\t Record not Found ")
else:
self.con.commit()
print("\t\t\t Record Deleted Successfully")
print("\t\t\t")
def __del__(self):
self.con.close()
print("\t\t\t\tGood Day")
print("\t\t\t ******* Thank You ******* ")
|
[
"noreply@github.com"
] |
Bhavesh43m.noreply@github.com
|
dc09baa5760e2ec6c0ad15417507b94a1c229c7e
|
3f93be3de0597c19437462f2ecd5d1fe08e9df6f
|
/111. 二叉树的最小深度.py
|
9309f050719490eb16cea49ce6907055c59beaea
|
[] |
no_license
|
Fzldq/myLeetCode
|
3dc3819b39d1fe33cce9dfac28177ae83a383982
|
b2bf9c9a42156afd87a0d13722e364095f3ab137
|
refs/heads/master
| 2023-04-06T00:58:22.541707
| 2021-04-17T09:58:33
| 2021-04-17T09:58:33
| 280,112,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
leftdepth = self.minDepth(root.left)
rightdepth = self.minDepth(root.right)
if not (leftdepth and rightdepth):
return leftdepth + rightdepth + 1
return 1 + min(leftdepth, rightdepth)
|
[
"qq417856426@gmail.com"
] |
qq417856426@gmail.com
|
40630338544aedf15933c5be3ae50eb362a20b9c
|
6f52cd0eaaa4d27c7498f368c07de3fe93e2b522
|
/teacher_demo/asgi.py
|
1146b62dfd5c3cd7b7e9ea9159af96165aa02793
|
[] |
no_license
|
xuepl/teaching_demo
|
e151b1b1dc9458047b41919745ae7cf44b78a630
|
7b0b0772a994ddfa5aa035a6b3220c4b4ce2e2b0
|
refs/heads/master
| 2022-10-16T19:48:02.084377
| 2020-06-12T10:16:23
| 2020-06-12T10:16:23
| 271,768,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for teacher_demo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teacher_demo.settings')
application = get_asgi_application()
|
[
"xuepl@guoyasoft.com"
] |
xuepl@guoyasoft.com
|
c2692387d1ab446b93ac57e1b7db8bcbc25390cc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_083/ch54_2020_03_29_00_30_32_312445.py
|
7cd43138e65f5703e5002f181122e1a42c3dcba0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
def calcula_fibonacci(x):
f=[0]*x
f[0]=1
f[1]=1
i=0
while x>0:
f[i]=f[i-1]+f[i-2]
i+=1
print(fibonacci(9))
|
[
"you@example.com"
] |
you@example.com
|
d58871e220e8299a13ddf0663865a9ae04bff22a
|
4c3c5e3fdf5d5f3e65f023b762019cf1ea703923
|
/class_.py
|
c7030a79bab50b8b15fa51e6cbfdbd5b1609cc90
|
[] |
no_license
|
bli-bli/first.py
|
de628e58b3f4c2b2cf9143710a403e40a1ba2635
|
830a35c8f0772b15cfee3cc1cbc0d2f9de1f234d
|
refs/heads/main
| 2023-01-08T12:14:08.140721
| 2020-11-06T07:26:46
| 2020-11-06T07:26:46
| 310,171,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Unit:
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
print(f"{self.name} 유닛이 생성되었습니다.")
print(f"체력 {self.hp}, 공격력 {self.damage}")
marine1 = Unit("마린", 40, 5)
marine2 = Unit("마린", 40, 5)
tank = Unit("탱크", 150, 35)
|
[
"60463567+bli-bli@users.noreply.github.com"
] |
60463567+bli-bli@users.noreply.github.com
|
a8419f4e8a9e22a11307913d44cd08fd2c2eb3ee
|
e836eb7ee910c1ca85233c48eadcd49a52bb20ea
|
/Utils/_Examples.py
|
8cfcd0cb6980e2238bff0b6436b7ba546a6ffaa5
|
[] |
no_license
|
SymmetricChaos/MyOtherMathStuff
|
5d0c47adfaad0e7fb7f0e8736617f15bbac7ed37
|
9dd6920b44658d2faacb54d7120e83ff7de45bf3
|
refs/heads/master
| 2021-06-26T18:34:13.178520
| 2021-01-03T14:22:10
| 2021-01-03T14:22:10
| 196,845,677
| 38
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,293
|
py
|
import Drawing as draw
from MatPlotTurtle import mplTurtle
import numpy as np
import os
import random
from PointManip import midpoint
def general_example(save=False):
canvas1 = draw.make_blank_canvas([15,15],facecolor="lightgray")
# Make and use a subplot
sp1 = draw.make_blank_plot(2,2,1,[-3,3])
# mbline takes ylim and xlim from axes to automatically appear infinite
draw.mbline(-.5,0)
# mbline or mblines can be manually limited
slopes = np.linspace(0,5,10)
draw.mblines(slopes,[0]*20,xlim=[-2,1],ylim=[-2,1],color='red')
# Subplots of different layouts can coexist
sp2 = draw.make_blank_plot(4,4,4,[-2,2],facecolor='yellow')
draw.closed_curve_xy([1,2,3],[0,1,0],color='salmon')
# Create an mbline on the most recently created axes
draw.mbline(1,1)
# Plots can be created out of order
# When subplots are not square shapes are warped
sp3 = draw.make_blank_plot(2,2,4,[-3,3],[-5,5])
draw.circle_xy(1.5,-.5,1,fc='white',ec='black')
draw.text_xy(-.8,3.5,"Shapes and lines on this plot are skewed\nbut not text\nthat's a circle down there",
ha="center")
# Show automatic xlim and ylim settings
sp4 = draw.make_blank_plot(4,4,7)
x = np.random.uniform(-2,2,200)
y = x+np.random.uniform(-2,2,200),
draw.dots_xy(x,y,alpha=.5,color='limegreen')
draw.title("BLOOPS",size=30)
# Add lines to a chosen axes
draw.mblines([1,2,3],[0,0,0],ax=sp3)
# Make some circles
draw.circles_xy([0,1,2],[0,0,0],[.5,.3,1],sp2,ec='black',fc='green')
# Title on selected axis
draw.title(r'We can use LaTeX $\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$',ax=sp1,size=16,pad=20)
draw.canvas_title("A Title for the whole Canvas")
# Show how to put an image into a plot
cur_dir = os.getcwd()
tree_pic = cur_dir+"\\Tree.png"
draw.image(tree_pic,-2,1,scale=.3,ax=sp1)
draw.image_box(tree_pic,2,-1,scale=.3,ax=sp1)
sp5 = draw.make_blank_plot(4,4,9)
draw.table([["A","B","C"],
["1","2","3"],
["x","y","z"],
["{}"," ","NULL"]],
loc='center',colWidths=[.2,.2,.2],yscale=2)
draw.arrow_p([0,0],[.5,-.5],head_width=.2,ax=sp3)
if save:
canvas1.savefig('fig1.png', dpi=canvas1.dpi)
def statistical_plots(save=False):
canvas2 = draw.make_blank_canvas([15,15])
draw.canvas_title("Some Satistical Plots",size=25)
draw.make_plot(3,3,1)
draw.histogram(np.random.gamma(9,3,900),fc="orange",ec="black")
draw.title("Histogram")
# For some reason a pie chart will automatically supress the frame of the
# plot that contains it
draw.make_blank_plot(3,3,2)
draw.pie_chart([1,1,2,2,5],explode=[0,.1,0,0,.05],frame=True,
radius=.3,center=(.5,.5))
draw.title("Pie Chart")
fake_data = [np.random.exponential(1,50),
np.random.exponential(2,50),
np.random.standard_normal(50)]
draw.make_plot(3,3,3)
draw.boxplot(fake_data,labels=["A","B","C"])
draw.title("Boxplot")
draw.make_blank_plot(3,3,4)
draw.violin_plot(fake_data,labels=["A","B","C"],vert=False)
draw.title("Violin Plot")
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
draw.make_blank_plot(3,3,5)
draw.quiver_plot(X,Y,U,V)
draw.title("Quiver Plot")
if save:
canvas2.savefig('fig2.png', dpi=canvas2.dpi, pad=0)
def turtle_plots(save=False):
canvas3 = draw.make_blank_canvas([15,15])
draw.canvas_title("Turtle Graphics",size=25)
draw.make_blank_plot(2,2,1,xlim=[-12,12])
draw.title("A Cesaro Tree")
def my_tree(turt,level):
turt.linewidth = level
turt.left(45)
turt.forward(level/4)
if level > 0:
my_tree(turt,level-1)
turt.linewidth = level
turt.backward(level/4)
turt.right(90)
turt.forward(level/4)
if level > 0:
my_tree(turt,level-1)
turt.linewidth = level
turt.backward(level/4)
turt.left(45)
my_turtle = mplTurtle(color='brown',angle=90,alpha=.4)
my_tree(my_turtle,9)
draw.make_plot(2,2,2,xlim=[-5,5])
draw.title("Arrows Instead of Lines")
turtle1 = mplTurtle(linewidth=2,arrow_headwidth=.2)
for i in range(40):
turtle1.forward(.5+.03*i)
turtle1.left(33)
draw.make_plot(2,2,3,xlim=[-5,5])
draw.title("Stamps Only\nNo Lines")
P = [(-4,-4),(4,-4),(0,4)]
turtle2 = mplTurtle(draw=False,color="gray")
for i in range(600):
newpos = midpoint(turtle2.pos,random.choice(P))
turtle2.move_to(newpos)
turtle2.stamp(.04)
draw.make_plot(2,2,4,xlim=[-5,5])
draw.title("Bounded in a Nutshell")
P = [(-4,-4),(4,-4),(4,4),(-4,4)]
turtle3 = mplTurtle(color='green')
for i in range(500):
target = random.choice(P)
turtle3.point_to(target)
turtle3.forward(1)
if save:
canvas3.savefig('fig3.png', dpi=canvas3.dpi, pad=0)
if __name__ == '__main__':
general_example()
statistical_plots()
# turtle_plots()
|
[
"ajfraebel@gmail.com"
] |
ajfraebel@gmail.com
|
57403bafc5545d23401c44f2c76cf867cadcea40
|
8853462a79608b7e5b7af94dbfa6c0a63c1f6b6a
|
/CustomEnvs/Gym envs/1. Reacher/Reacher_11/Reacher_11/__init__.py
|
f11aabcf897914387bbb332b291c7dfc6e6c65be
|
[] |
no_license
|
Ashish017/CASNET
|
eaae2552f8d56413f756c7d3839cd6f548a6e1ef
|
73ec542c4c3fa1f97686796f0c385c71cad3e8d5
|
refs/heads/master
| 2023-02-06T06:53:27.362356
| 2020-12-27T04:43:34
| 2020-12-27T04:43:34
| 270,657,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from gym.envs.registration import register
register(
id='Reacher-v11',
entry_point='Reacher_11.envs:Reacher_v11',
)
|
[
"ashishmalik7017@gmail.com"
] |
ashishmalik7017@gmail.com
|
acc34ef7131901e1edaaa56b067879a972cb287c
|
e81b15aa0e6f367dae258a2246687eece8fa2f7f
|
/Tkinter编程实例/21_Canvas.py
|
06c454f5c4301d8da7ea9c53146271dca281b058
|
[] |
no_license
|
highshoe/PYTHON
|
17987267e1d67aed94f4de78d3e1aed9dcdacb3f
|
85ac06adc5023e29cd2246d024231ae5ddbe06b1
|
refs/heads/master
| 2020-05-16T05:16:05.744811
| 2018-01-07T02:17:49
| 2018-01-07T02:17:49
| null | 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 1,351
|
py
|
#!/usr/bin/python
#coding:gbk
# 画布
#提供可以用来进行绘图的Container,支持基本的几何元素,使用Canvas 进行绘图时,所
#有的操作都是通过Canvas,不是通过它的元素
#元素的表示可以使用handle 或tag
from tkinter import *
root = Tk()
#------------------创建画布------------------------------------------
cv = Canvas(root,bg = 'white')
#------------------创建一个项(矩形,填充色,边框)------------------------------------------
cv.create_rectangle(10,10,110,110,fill='skyblue',outline = 'red',width=5)
#------------------创建一个项(线条)------------------------------------------
#如何画一条 sin() 的曲线
#import math
#x = range(0,1000)
#y = [math.sin(x[i]) for i in x]
#y = [int(y[i]*1000)+1000 for i in x]
cv.create_line(10,10,110,120,150,180,smooth=1,width=2,fill='blue')
#-------------------画虚线------------------------------------------------------------
cv.create_rectangle(120,10,220,110,dash = 2,fill='green')
#-------------------使用画刷填充------------------------------------------------------------
rt = cv.create_rectangle(230,10,330,110,outline='red',stipple='gray12',fill='green')
#-------------------修改 item 的坐标------------------------------------------------------------
cv.coords(rt,(300,10,340,110))
cv.pack()
mainloop()
|
[
"icewar@qq.com"
] |
icewar@qq.com
|
46df371ebc66d892a2e9d67372f6731df29240cd
|
9e4d78b76b4c56e4750256a8b6f541df63887bdc
|
/chain.py
|
20966813e4ea11fa9eb03282b11f14eb122d0a2f
|
[] |
no_license
|
yekingyan/Algorithms_in_the_Python
|
40c783ce50c1bed5a09dac4382a1f4a56df05bb6
|
95655a7b22324978cabd2e35a58ad5294a36aba8
|
refs/heads/master
| 2020-03-23T14:39:01.368330
| 2018-08-22T06:34:54
| 2018-08-22T06:34:54
| 141,690,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
# 实现链表的Node类是一个节点,
# 有两个属性,一个存储元素,一个存储指向另一个节点的引用
class Node():
def __init__(self, element=None, next_n=None):
self.e = element
# 存下一个Node
self.next = next_n
def append(self, element):
"""
插入到链表尾,往node末尾插入一个元素
node:是一个Node实例
element:任意类型元素
"""
n = self
while n.next is not None:
n = n.next
# 循环结束n是最后一个元素,最后的n.next = None
# 假设传node参数为head,最后的n.next是n3
new_node = Node(element)
# 将上面的点加到n.next
n.next = new_node
@staticmethod
def prepend(element):
"""
插入到链表头
:param head:传入一个头
:param element:任意类型元素
:return:
"""
head = Node()
n = Node(element)
# n 成为了新的头
n.next = head.next
# 将数据写入头
head.next = n
def pop(self):
"""
传入头删掉最末尾的元素
:param head: 传入一个头
:return:返回被删掉的对象的值
"""
head = self.head # todo 不是同一个head,新的
tail = head
while tail.next is not None:
tail = tail.next
# 此时tail是最末尾的元素
print("tail:", tail)
n = head
print('n:', n.next)
while n.next is not tail:
n = n.next
# 此时n是tail之前的元素
print(n)
# 清掉最后一个元素
n.next = None
# 返回被删掉元素的值
return tail.e
def pop(head):
"""
传入头删掉最末尾的元素
:param head: 传入一个头
:return:返回被删掉的对象的值
"""
tail = head
while tail.next is not None:
tail = tail.next
# 此时tail是最末尾的元素
n = head
while n.next is not tail:
n = n.next
# 此时n是tail之前的元素
# 清掉最后一个元素
n.next = None
# 返回被删掉元素的值
return tail.e
def log_list(node):
"""
打印一个点及之后所有点的值
:param node: Node的实例
"""
n = node
s = ''
while n is not None:
s += (str(n.e) + '>')
n = n.next
print(s)
def main():
head = Node()
n1 = Node(111)
n2 = Node(222)
n3 = Node(333)
head.next = n1
n1.next = n2
n2.next = n3
head.append(444)
head.append(555)
# pop(head)
head.pop()
head.prepend('header')
log_list(head)
# None>header>111>222>333>444>
def main2():
pass
if __name__ == '__main__':
main()
|
[
"529616@@gmail.com"
] |
529616@@gmail.com
|
b9aa512c8a4f5f6ffc43510f4bd9d37d30aeeab8
|
d83ad4b9e70a5109a556670dc9c0f8be02f0c91f
|
/id3plamen.py
|
1a665fb6f6652cfaa14e5210aad2b0ce748725a9
|
[] |
no_license
|
petrovplamenph/Artificial-Intelligence-python-implementations-from-scratch
|
2378ae422a4e2dedae76084997e2a5b3221bfbc3
|
e1e7e004aa37f7a640b9914d0574d4b78b5f3800
|
refs/heads/master
| 2021-09-16T01:45:12.579446
| 2018-06-14T16:04:57
| 2018-06-14T16:04:57
| 99,428,336
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,422
|
py
|
from scipy.io import arff
import pandas as pd
from statistics import mean
import random
import math
"""
#TEST DATA
labels = ['sky','air','hum','wind','watter','forecast','like']
data =[('sun','hot','normal','strong','hot','same',1),
('sun','hot','high','strong','hot','same',1),
('rain','cold','high','strong','hot','change',0),
('sun','hot','high','strong','cold','change',1)]
df = pd.DataFrame.from_records(data,columns=labels)
df.loc[len(df)]=['sun','hot','normal','week','hot','same',0]
"""
data, meta = arff.loadarff('breast-cancer.arff')
cancer_df = pd.DataFrame(data)
dummy_target_column = (cancer_df.iloc[:,-1]==cancer_df.iloc[0,-1])
#first element has value 'recurrence-events',and this value will be defined as the positive class
cancer_df.iloc[:,-1] = dummy_target_column
def calculate_entropy(list_counts):
all_elements = sum(list_counts)
entropy = 0
for element in list_counts:
if element != 0:
entropy += -math.log2(element/all_elements)*(element/all_elements)
return entropy
def gain(inital_distrib, distributions_after):
inital_distrib, distributions_after
total=sum(inital_distrib)
gain = calculate_entropy(inital_distrib)
for split in distributions_after:
total_after = sum(split)
k = total_after/total
split_entropy = calculate_entropy(split)
gain -= k*split_entropy
return gain
def calculate_gain(df):
cols = df.columns.values.tolist()
target = cols[-1]
ipos = df[df[target] == 1].shape[0]
ineg = df[df[target] == 0].shape[0]
initial_distribution = [ipos, ineg]
max_gain = -1
max_atribute = ''
for atribute in cols[:-1]:
features = df[atribute].unique()
feature_distribution = []
col = df[atribute]
for feature in features:
pos_feat = col[(col == feature) & (df[target] == 1)].shape[0]
neg_feat = col[(col == feature) & (df[target] == 0)].shape[0]
feature_distribution.append([pos_feat, neg_feat])
entropy_gain = gain(initial_distribution,feature_distribution)
if entropy_gain > max_gain:
max_gain = entropy_gain
max_atribute = atribute
return(max_atribute)
class node():
def __init__(self, children=[], atribute='leafnext',classification = 'A'):
self.children = children
self.atribute = atribute
self.classification = classification
self.feature_vals_to_node = {}
self.el_Id = 'node'
def __str__(self):
return str(self.atribute)
def getId(self):
return self.el_Id
def setAtt(self, val):
self.atribute = val
def getAtt(self):
return self.atribute
def addfeature_vals_to_node(self, feat, next_node):
self.feature_vals_to_node[feat] = next_node
def addChild(self, child):
self.children.append(child)
def getChildren(self):
dictonary = self.feature_vals_to_node
return (dictonary,list(dictonary.keys()))
def setClassification(self,val):
self.classification = val
def getClassification(self):
return self.classification
class leaf():
def __init__(self,feature_val='',classification = 'A'):
self.feature_val = feature_val
self.classification = classification
self.el_id = 'leaf'
def getId(self):
return self.el_id
def setClassification(self,val):
self.classification = val
def getClassification(self):
return self.classification
def getFeature_val(self):
return self.feature_val
def fit(data, last_node):
cols = data.columns.values.tolist()
target = cols[-1]
number_cols = data.shape[0]
if data.shape[1] == 1:
new_leaf = leaf('whatever', data.mean()>0.5)
last_node.addfeature_vals_to_node('whatever', new_leaf)
return last_node
if data[data[target] == 1].shape[0] == number_cols:
new_leaf = leaf('whatever', 1)
last_node.addfeature_vals_to_node('whatever', new_leaf)
return last_node
if data[data[target] == 0].shape[0] == number_cols:
new_leaf = leaf('whatever', 0)
last_node.addfeature_vals_to_node('whatever', new_leaf)
return last_node
atribute = calculate_gain(data)
features = data[atribute].unique()
col = data[atribute]
pos_total = 0
neg_total = 0
for feature in features:
pos_feat = col[(col == feature) & (data[target] == 1)].shape[0]
pos_total += pos_feat
neg_feat = col[(col == feature) & (data[target] == 0)].shape[0]
neg_total += neg_feat
if neg_feat == 0:
new_leaf = leaf(feature, 1)
last_node.addfeature_vals_to_node(feature, new_leaf)
elif pos_feat == 0:
new_leaf = leaf(feature, 0)
last_node.addfeature_vals_to_node(feature, new_leaf)
else:
next_data = data[data[atribute] == feature]
next_data = next_data.drop([atribute], axis=1)
new_node = node()
fit(next_data, new_node)
last_node.addfeature_vals_to_node(feature, new_node)
last_node.setAtt(atribute)
unseen_value_class = int(pos_total>neg_total)
last_node.setClassification(unseen_value_class)
return last_node
root = node()
fit(cancer_df, root)
def preddict_example(example,current_node):
test_atribute = current_node.getAtt()
if test_atribute == 'leafnext':
children, key = current_node.getChildren()
return children[key[0]].getClassification()
sample_atribite_val = example[test_atribute].values[0]
children, keys = current_node.getChildren()
for i in range(len(keys)):
feature = keys[i]
if feature == 'whatever':
return children[keys[i]].getClassification()
if feature == sample_atribite_val:
if children[keys[i]].getId() == 'leaf':
return children[keys[i]].getClassification()
else:
return preddict_example(example,children[keys[i]])
return current_node.getClassification()
def predict(data,three):
y_vect = []
for idx in range(len(data)):
example = data.iloc[[idx],:]
prdiction = preddict_example(example,three)
y_vect.append(prdiction)
return y_vect
def accuracy(y1,y2):
summs = 0
for idx in range(len(y1)):
if int(y1[idx]) == int(y2[idx]):
summs += 1
return summs/len(y1)
def crossval(data):
cols = data.columns.values.tolist()
data = data.values.tolist()
random.seed(2)
random.shuffle(data)
cut_interval = int(len(data)/10)
avg_acc = []
for idx in range(10):
start = idx*cut_interval
stop = start + cut_interval
test = data[start:stop]
test = pd.DataFrame(test, columns=cols)
train = data[0:start] + data[stop:]
train = pd.DataFrame(train, columns=cols)
three = node()
fit(train, three)
y_pred = predict(test,three)
y_test = test[cols[-1]].tolist()
acc = accuracy(list(y_pred),list(y_test))
avg_acc.append(acc)
mean_acc = mean(avg_acc)
print('mean',mean_acc)
return mean_acc
crossval(cancer_df)
|
[
"petrovplamen92@gmail.com"
] |
petrovplamen92@gmail.com
|
57825a49509f088ef8b2e46fe76cea7785c73c78
|
e4fd6e578d9c57d8a4e0ef2ad4ccbfaa6037bc1e
|
/setup.py
|
0648fcfcb460d9ca7e1caa9237c64027e5ce810d
|
[
"MIT"
] |
permissive
|
nicwaller/jira-text
|
c51235e67c5a6433081215a38f58e04c7894b9f8
|
bb5d9867ecfe8ff48e4b18d123a61eec3c67a936
|
refs/heads/master
| 2021-01-13T12:51:31.074737
| 2017-01-18T09:45:45
| 2017-01-18T09:45:45
| 78,465,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
from setuptools import setup, find_packages
import os
version_file = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jiraexport', 'VERSION'))
version = version_file.read().strip()
setup(
name='jiraexport',
version=version,
author='Nic Waller',
author_email='code@nicwaller.com',
description='Export issues from a JIRA database to flat files',
url='https://github.com/nicwaller/jiraexport',
install_requires=[
'click', # for making nice command-line utilities
'sqlalchemy',
'pymysql', # Pure native python MySQL adapter that SQLalchemy can use
'progressbar2',
],
entry_points={
"console_scripts": [
"jiraexport=jiraexport.cli:main",
]
},
packages=find_packages(),
include_package_data=True,
package_data={
'jiraexport': ['jiraexport/VERSION'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Archiving'
]
)
|
[
"nic.waller@pulseenergy.com"
] |
nic.waller@pulseenergy.com
|
1437097965b83bd5e3dc94e27f00ef10c7978301
|
4bb1ee3d84ca73193fc0bca6b657ad5c9f582698
|
/spiders/isbnSpider/isbnSpider/pipelines.py
|
1ead49e989cfd44766611f26dad85ac41ffdff76
|
[
"MIT"
] |
permissive
|
mazhen18/ScrapyAccordingISBN
|
1dbcfe6a52990a4d257de5e357c3b7b2944c9605
|
43f8362c117247e03659c61d267b8cef435e8b64
|
refs/heads/master
| 2020-03-27T20:21:02.971389
| 2018-10-16T14:12:31
| 2018-10-16T14:12:31
| 147,062,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class IsbnspiderPipeline(object):
def process_item(self, item, spider):
return item
|
[
"zhenma1225@gmail.com"
] |
zhenma1225@gmail.com
|
d9277628451df7d0c4dbe736069b8934a52f1de1
|
e89cc313322371b5dc3e198f920547a408d4a262
|
/fantasybasketball/main_app/migrations/0010_auto_20190917_1520.py
|
c4525fcd783161669b705601e1b68730697f9135
|
[] |
no_license
|
samiduara/fantasybasketball
|
9043f2887f8dfd703205e4f01ef8e6ca25e0422f
|
ad5394953f4b4ac466f325c11dd9138184f9ab94
|
refs/heads/master
| 2020-07-25T03:42:01.260100
| 2019-09-17T15:50:42
| 2019-09-17T15:50:42
| 208,154,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# Generated by Django 2.2.3 on 2019-09-17 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0009_delete_profile'),
]
operations = [
migrations.AddField(
model_name='team',
name='owner_points',
field=models.TextField(max_length=250, null=True),
),
migrations.AddField(
model_name='team',
name='rank',
field=models.IntegerField(null=True),
),
]
|
[
"samgathon@hotmail.com"
] |
samgathon@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.