blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e057ef624b94fe0256123ec91bdf0734eb2d87bd
|
a79bc871a72d2c39bcbb7cb4242a7d469770bed0
|
/masking_api_60/api/file_format_api.py
|
750b5fa51da54a2f71b8f4f29ba1d59f7edf3fc1
|
[] |
no_license
|
pioro/masking_api_60
|
5e457249ab8a87a4cd189f68821167fa27c084f2
|
68473bdf0c05cbe105bc7d2e2a24e75a9cbeca08
|
refs/heads/master
| 2023-01-03T08:57:49.943969
| 2020-10-30T11:42:15
| 2020-10-30T11:42:15
| 279,624,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,223
|
py
|
# coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from masking_api_60.api_client import ApiClient
class FileFormatApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_file_format(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
else:
(data) = self.create_file_format_with_http_info(file_format, file_format_type, **kwargs) # noqa: E501
return data
def create_file_format_with_http_info(self, file_format, file_format_type, **kwargs): # noqa: E501
"""Create file format # noqa: E501
WARNING: The generated curl command is incorrect, so please refer to the Masking API guide for instructions on how to upload files through the API # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_file_format_with_http_info(file_format, file_format_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file_format: The file format to be uploaded. The logical name of the file format will be exactly the name of this uploaded file (required)
:param str file_format_type: The type of the file format being uploaded (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format', 'file_format_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format' is set
if ('file_format' not in params or
params['file_format'] is None):
raise ValueError("Missing the required parameter `file_format` when calling `create_file_format`") # noqa: E501
# verify the required parameter 'file_format_type' is set
if ('file_format_type' not in params or
params['file_format_type'] is None):
raise ValueError("Missing the required parameter `file_format_type` when calling `create_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file_format' in params:
local_var_files['fileFormat'] = params['file_format'] # noqa: E501
if 'file_format_type' in params:
form_params.append(('fileFormatType', params['file_format_type'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_format(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.delete_file_format_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def delete_file_format_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Delete file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_format_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `delete_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_file_formats(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_file_formats_with_http_info(**kwargs) # noqa: E501
return data
def get_all_file_formats_with_http_info(self, **kwargs): # noqa: E501
"""Get all file formats # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_file_formats_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get file formats. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: FileFormatList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_file_formats" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormatList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_format_by_id(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
else:
(data) = self.get_file_format_by_id_with_http_info(file_format_id, **kwargs) # noqa: E501
return data
def get_file_format_by_id_with_http_info(self, file_format_id, **kwargs): # noqa: E501
"""Get file format by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_format_by_id_with_http_info(file_format_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to get (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_format_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `get_file_format_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_format(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_file_format_with_http_info(file_format_id, body, **kwargs) # noqa: E501
return data
def update_file_format_with_http_info(self, file_format_id, body, **kwargs): # noqa: E501
"""Update file format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_format_with_http_info(file_format_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int file_format_id: The ID of the file format to update (required)
:param FileFormat body: The updated file format (required)
:return: FileFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_format_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_format" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_format_id' is set
if ('file_format_id' not in params or
params['file_format_id'] is None):
raise ValueError("Missing the required parameter `file_format_id` when calling `update_file_format`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_file_format`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_format_id' in params:
path_params['fileFormatId'] = params['file_format_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/file-formats/{fileFormatId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"marcin@delphix.com"
] |
marcin@delphix.com
|
86e28e058036e1492b3a36eb2e82fa2641ec4989
|
2deddb3a19163f9cc461bdb9c2dacfdbb57b9f47
|
/sfbay_cons_tracer_00.py
|
dc2f2067ff4a50328831179e3832cba4d0b2f4f5
|
[] |
no_license
|
rustychris/sfbay_cons_tracer
|
f1548bbf06841d56790110e1019c92ee60e5a142
|
7d7653d5747b638a10ebdb3dac7b2b89c18cbc1c
|
refs/heads/master
| 2021-06-11T02:01:16.386183
| 2019-09-06T19:33:32
| 2019-09-06T19:33:32
| 128,102,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,917
|
py
|
"""
Copied from
/home/rusty/models/delft/nbwaq/spinupdate/spinupdate_wy2013_D02cons_tracer.py
Moving on to 4/28/16, np=4, D02 hydrodynamics
this one does the full list of passive tracers
"""
import os
import shutil
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
import numpy as np
from stompy import utils
from stompy.model.delft import waq_scenario
import pandas as pd
import ugrid
from stompy.spatial import (wkb2shp,proj_utils)
from stompy.grid import unstructured_grid
##
# The 2017-10-16 runs used this older hydrodynamic data.
# hydro=waq_scenario.HydroFiles("../../delft/sfb_dfm_v2/runs/wy2013a/DFM_DELWAQ_wy2013a/wy2013a.hyd")
# This is the hydro described in the Intermim Model Validation Report, with the adjusted
# fluxes for issues with that version of DFM.
hydro=waq_scenario.HydroFiles("../../delft/sfb_dfm_v2/runs/wy2013c/DFM_DELWAQ_wy2013c_adj/wy2013c.hyd")
hydro.enable_write_symlink=True
##
PC=waq_scenario.ParameterConstant
Sub=waq_scenario.Substance
IC=waq_scenario.Initial
# Water quality setup
class Scen(waq_scenario.Scenario):
name="sfb_dfm_v2"
desc=('sfb_dfm_v2',
'wy2013c',
'conserv_tracer')
# removed BALANCES-SOBEK-STYLE
# IMPORTANT to have the NODISP-AT-BOUND in there.
integration_option="""15 ;
LOWER-ORDER-AT-BOUND NODISP-AT-BOUND
BALANCES-OLD-STYLE BALANCES-GPP-STYLE
BAL_NOLUMPPROCESSES BAL_NOLUMPLOADS BAL_NOLUMPTRANSPORT
BAL_NOSUPPRESSSPACE BAL_NOSUPPRESSTIME
"""
base_path='runs/wy2013c-20180404'
#maybe this will be more stable with shorter time step?
# with 30 minutes, failed with non-convergence at 0.09% or so.
# with 15 minutes, failed with non-convergence at 7.28%
time_step=1000 # dwaq HHMMSS integer format
# a bit less annoying than nefis
map_formats=['binary']
# stormwater is grouped into a single tracer
storm_sources=['SCLARAVW2_flow',
'SCLARAVW1_flow',
'SCLARAVW4_flow',
'SCLARAVW3_flow',
'UALAMEDA_flow',
'EBAYS_flow',
'COYOTE_flow',
'PENINSULb1_flow',
'EBAYCc3_flow',
'USANLORZ_flow',
'PENINSULb3_flow',
'PENINSULb4_flow',
'EBAYCc2_flow',
'PENINSULb6_flow',
'PENINSULb2_flow',
'PENINSULb7_flow',
'PENINSULb5_flow',
'SCLARAVCc_flow',
'SCLARAVW5_flow',
'MARINS1_flow',
'EBAYCc6_flow',
'EBAYCc1_flow',
'EBAYCc5_flow',
'EBAYCc4_flow',
'MARINN_flow',
'NAPA_flow',
'CCOSTAW2_flow',
'CCOSTAW3_flow',
'MARINS3_flow',
'MARINS2_flow',
'PETALUMA_flow',
'SONOMA_flow',
'CCOSTAW1_flow',
'SOLANOWc_flow',
'CCOSTAC2_flow',
'EBAYN1_flow',
'EBAYN4_flow',
'EBAYN2_flow',
'EBAYN3_flow',
'SOLANOWa_flow',
'SOLANOWb_flow',
'CCOSTAC3_flow',
'CCOSTAC1_flow',
'CCOSTAC4_flow']
delta_sources=['Jersey_flow',
'RioVista_flow']
sea_sources=[ 'Sea_ssh' ]
# run only a subset of substances
sub_subset=None
def init_substances(self):
subs=super(Scen,self).init_substances()
# with the DFM run, how do we get the labeling of boundary condition
# and discharge flows?
# previous code just used horizontal boundary flows.
# what do the discharges look like the dwaq data?
# there is a .bnd file, with 88 labeled entries, things like
# "SCLARAVW2_flow".
# each of those has what appears to be the numbers (negative)
# for the boundary exchanges (sometime several boundary exchanges)
# and some xy coordinates
# maybe that's all we need.
link_groups=self.hydro.group_boundary_links()
groups={} # just used for sanity checks to make sure that BCs that we're
# trying to set exist.
for link_group in link_groups:
if link_group['id']>=0 and link_group['name'] not in groups:
groups[ link_group['name'] ] = link_group
# all src_tags default to a concentration BC of 1.0, which is exactly
# what we want here. no need to specify additional data.
def check(name):
return (self.sub_subset is None) or (name in self.sub_subset)
for k in groups.keys():
if k in self.delta_sources + self.storm_sources + self.sea_sources:
# these are lumped below
continue
# if k!='ebda':#DBG
# continue
name=k
if name=='millbrae':
# millbrae and burlingame got combined along the way, due to
# entering in the same cell.
name='millbrae_burlingame'
if not check(name):
continue
print("Adding tracer for %s"%name)
subs[name]=Sub(initial=IC(default=0.0))
# any additional work required here?
# hopefully waq_scenario is going to use these same names to label
# boundaries
self.src_tags.append(dict(tracer=name,items=[k]))
if check('delta'):
subs['delta']=Sub(initial=IC(default=0.0))
self.src_tags.append( dict(tracer='delta',
items=self.delta_sources) )
if check('stormwater'):
subs['stormwater']=Sub(initial=IC(default=0.0))
self.src_tags.append( dict(tracer='stormwater',
items=self.storm_sources) )
if check('sea'):
subs['sea']=Sub(initial=IC(default=0.0))
self.src_tags.append( dict(tracer='sea',
items=self.sea_sources) )
if check('continuity'):
subs['continuity']=Sub(initial=IC(default=1.0))
self.src_tags.append( dict(tracer='continuity',
items=list(groups.keys())))
return subs
def init_parameters(self):
# choose which processes are enabled. Includes some
# parameters which are not currently used.
params=super(Scen,self).init_parameters()
params['NOTHREADS']=PC(24) # one processor.. or maybe two, or four
# maybe defaulted to 1e-7?
# 1e-6 failed to 0.09%, same as before
params['Tolerance']=1e-5
params['ACTIVE_DYNDEPTH']=1
params['ACTIVE_TOTDEPTH']=1
return params
def cmd_default(self):
self.cmd_write_hydro()
self.cmd_write_inp()
self.cmd_delwaq1()
self.cmd_delwaq2()
def __init__(self,*a,**k):
super(Scen,self).__init__(*a,**k)
self.map_output+=('TotalDepth',
'volume',
'depth')
##
if __name__=='__main__':
scen=Scen(hydro=hydro,
sub_subset=['stormwater'])
scen.start_time=scen.time0+scen.scu*hydro.t_secs[0]
scen.stop_time =scen.time0+scen.scu*hydro.t_secs[-2]
# real run, but just daily map output
scen.map_time_step=240000 # map output daily
# debugging - hourly output
# scen.map_time_step=10000 # map output hourly
if 1:
scen.cmd_default()
# scen.cmd_write_nc()
else:
scen.main()
|
[
"rustyh@sfei.org"
] |
rustyh@sfei.org
|
57dcf4ac4135b21514dedd7221615770f17b6223
|
5b315e1606c8b3c753431f028e9bb76600148db7
|
/todo_list/admin.py
|
857b3bd6df7d0d13fc69369d7feeffcd07676f99
|
[] |
no_license
|
abel-masila/django_todo
|
0e23d45fefc45a0e9463639dbe7125d4a0fec8cf
|
634914967dc024e1a765fa8618481a23285cea7e
|
refs/heads/master
| 2020-04-30T01:06:40.794234
| 2019-03-23T13:40:02
| 2019-03-23T13:40:02
| 176,520,149
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from django.contrib import admin
from .models import List
# Register your models here.
admin.site.register(List)
|
[
"abelmasila@gmail.com"
] |
abelmasila@gmail.com
|
4861ac85dd04717d62ba306fdcc2924c58a23062
|
8ffc97dcf9bab5c6d2a75039ed12b2bc9acf6548
|
/HMWK_03_saa3053/ParseTree/Literal.py
|
0b30ca2c4aa398e8ac5f77904653d72b9b8a514e
|
[] |
no_license
|
saidadem3/compilers
|
2f88f51d57dbffd9623de01c318d499f2e7d06df
|
56e4b07485d412325970170c7b1544b5e914621b
|
refs/heads/master
| 2022-03-08T11:26:16.555606
| 2019-10-24T11:00:47
| 2019-10-24T11:00:47
| 213,783,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
# Adem, Said
# saa3053
# 2019-10-23
#---------#---------#---------#---------#---------#--------#
import sys
from .common import *
#---------#---------#---------#---------#---------#--------#
class Literal() :
def __init__( self, lineNum, valType, value ) :
self.m_NodeType = 'Literal'
self.m_LineNum = lineNum
self.m_Type = valType
self.m_Value = value
#---------------------------------------
def dump( self, indent = 0, fp = sys.stdout ) :
if self.m_Type.isReal() :
dumpHeaderLine( indent, self.m_LineNum,
f'LITERAL {self.m_Type!r} {self.m_Value:.16e}', fp )
else :
dumpHeaderLine( indent, self.m_LineNum,
f'LITERAL {self.m_Type!r} {self.m_Value!r}', fp )
#---------#---------#---------#---------#---------#--------#
|
[
"saidadem3@gmail.com"
] |
saidadem3@gmail.com
|
a6fa615f7fb4b26b658dab52008462a204aaa48f
|
c8fc387b0b440bf1681e581adaac5b5ef718525f
|
/project/src/utils/output_util.py
|
e49077470bc0acbdcf2c936edf1f99a927b514f5
|
[] |
no_license
|
MingjunGuo/bdt_5002
|
bc0f37a2f5453d531a6b73c1dccc959c1cc7311c
|
591ec7f3854d5d0e1ffd47f2086776253ce405ce
|
refs/heads/master
| 2020-04-01T13:09:35.819447
| 2019-01-24T06:50:28
| 2019-01-24T06:50:28
| 153,239,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
# -*- coding: UTF-8 -*-
## 主程序
import numpy as np
import pandas as pd
import os
def write_value_to_csv(city,
file_name,
values,
output_features,
day=True,
seperate=False,
one_day_model=False):
'''
write all the values to a csv file according to output_features.
day : False to use old model, True to use day model.
'''
# just use the last element of values
# values shape is (m, output_hours, output_features)
# values = values[-1,:,:]
df_list = []
for i in range(values.shape[0]):
value = values[i, :, :]
# load sample
if city == "bj":
df = pd.read_csv('E:/python/5002DM/project/data/sample_submission.csv')
# df = pd.read_csv("submission/sample_bj_submission.csv")
features = ["PM2.5", "PM10", "O3"]
df["PM2.5"] = df["PM2.5"].astype('float64')
df["PM10"] = df["PM10"].astype('float64')
for index in df.index:
test_id = df.test_id[index]
station, hour = test_id.split("#")
for feature in features:
r = get_value_from_array(value, output_features, station, int(hour), feature)
# print(r)
df.set_value(index, feature, r)
df.set_index("test_id", inplace=True)
df[df < 0] = 0
# rename columns
original_names = df.columns.values.tolist()
names_dict = {original_name: original_name + "_" + str(i) for original_name in original_names}
df.rename(index=str, columns=names_dict, inplace=True)
df_list.append(df)
df = pd.concat(df_list, axis=1)
if seperate:
path = "./model_preds_seperate/%s/%s.csv" % (city, file_name)
file_dir = os.path.split(path)[0]
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
df.to_csv(path)
if day:
path = "./model_preds_day/%s/%s.csv" % (city, file_name)
file_dir = os.path.split(path)[0]
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
df.to_csv(path)
if one_day_model:
path = "./model_preds_one_day/%s/%s.csv" % (city, file_name)
file_dir = os.path.split(path)[0]
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
df.to_csv(path)
else:
path = "./model_preds/%s/%s.csv" % (city, file_name)
file_dir = os.path.split(path)[0]
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
df.to_csv(path)
# if seperate:
# df.to_csv("model_preds_seperate/%s/%s.csv" % (city, file_name))
# if day:
# df.to_csv("model_preds_day/%s/%s.csv" % (city, file_name))
# if one_day_model:
# df.to_csv("model_preds_one_day/%s/%s.csv" % (city, file_name))
# else:
# df.to_csv("model_preds/%s/%s.csv" % (city, file_name))
def get_value_from_array(value_array, output_features, target_station, target_hour, target_feature):
for index, output_feature in enumerate(output_features) :
features = output_feature.split("_")
if "aq" in features :
features.remove("aq")
station, feature = features
if "aq" in target_station :
target_station = target_station.split("_")[0]
if target_station in station and feature == target_feature :
return value_array[target_hour,index]
return -1
|
[
"noreply@github.com"
] |
MingjunGuo.noreply@github.com
|
a5c946729ef57bcccc32f61428f82484fc908fba
|
f7387905bfe862e525602a5a3e997de088684e10
|
/Swagger/python-client/setup.py
|
bea8806adae6917c4d8806ab2f19f8cfe88674b4
|
[] |
no_license
|
AnirudhNagulapalli/Flyingsaucers
|
a5724f81e1dec698cc3bcebda182f04e6fb4e07a
|
a429300273626f259ee158ba8a97d44a23d68406
|
refs/heads/master
| 2021-01-23T14:30:53.828674
| 2017-12-11T09:37:58
| 2017-12-11T09:37:58
| 102,689,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# coding: utf-8
"""
Flying Saucers
This is for MSCS710 Project Course
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import sys
from setuptools import setup, find_packages
NAME = "swagger-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Flying Saucers",
author_email="",
url="",
keywords=["Swagger", "Flying Saucers"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
This is for MSCS710 Project Course
"""
)
|
[
"anirudh.nagulapalli1@marist.edu"
] |
anirudh.nagulapalli1@marist.edu
|
a1d1ee62356d167897f0766db41dcf93a9ca6203
|
c59a02471f295f93b56f9fadd51b47ec24414014
|
/historical/myenv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py
|
b48165bb7e18aced872069dd55d5b3a7989ca46d
|
[] |
no_license
|
jgdevelopment/Chronos-Backend
|
a3abe8bfea88d1456d03b2eaf411ab31d6b83fc1
|
548af4d7c8edc81c0b860862999dc0156b2e1fa2
|
refs/heads/master
| 2021-01-01T19:51:59.551831
| 2014-10-21T11:05:57
| 2014-10-21T11:05:57
| 16,659,327
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,968
|
py
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection should attempt.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter()
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
except_on_missing_scheme(proxy)
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
conn = self.poolmanager.connection_from_url(url.lower())
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme.lower()
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
# Proxy auth usernames and passwords will be urlencoded, we need
# to decode them.
username = unquote(username)
password = unquote(password)
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
[
"ginsdaman@gmail.com"
] |
ginsdaman@gmail.com
|
41a86538fd422e72ba79da31cd965f050d59b26c
|
9e27cc85675ec764a62764decdc85d6b57a10be3
|
/kaggle_kernel.py
|
63533471d892f754944895ad665a85aa841ca8ee
|
[] |
no_license
|
jsamaitis/Home-Credit-Default-Risk-2018
|
07886e3992301ca8e855773d615a41eecf647b5d
|
26d690eabe137d210e963b2daf36d03adfc057d4
|
refs/heads/master
| 2020-03-24T21:13:48.635974
| 2018-08-13T08:13:26
| 2018-08-13T08:13:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,043
|
py
|
# HOME CREDIT DEFAULT RISK COMPETITION
# Most features are created by applying min, max, mean, sum and var functions to grouped tables.
# Little feature selection is done and overfitting might be a problem since many features are related.
# The following key ideas were used:
# - Divide or subtract important features to get rates (like annuity and income)
# - In Bureau Data: create specific features for Active credits and Closed credits
# - In Previous Applications: create specific features for Approved and Refused applications
# - Modularity: one function for each table (except bureau_balance and application_test)
# - One-hot encoding for categorical features
# All tables are joined with the application DF using the SK_ID_CURR key (except bureau_balance).
# You can use LightGBM with KFold or Stratified KFold. Please upvote if you find usefull, thanks!
# Update 16/06/2018:
# - Added Payment Rate feature
# - Removed index from features
# - Set early stopping to 200 rounds
# - Use standard KFold CV (not stratified)
# Public LB increased to 0.792
import numpy as np
import pandas as pd
import gc
import time
from contextlib import contextmanager
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows = None, nan_as_category = False):
# Read data and merge
df = pd.read_csv('data/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('data/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, cat_cols = one_hot_encoder(df, nan_as_category)
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
# Some simple new features (percentages)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
del test_df
gc.collect()
return df
# Preprocess bureau.csv and bureau_balance.csv
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('data/bureau.csv', nrows = num_rows)
bb = pd.read_csv('data/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
# Preprocess previous_applications.csv
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('data/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
# Preprocess POS_CASH_balance.csv
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('data/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
# Features
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist()])
# Count pos cash accounts
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()
del pos
gc.collect()
return pos_agg
# Preprocess installments_payments.csv
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('data/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# Percentage and difference paid in each installment (amount paid and installment value)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
# Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
# Features: Perform aggregations
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# Count installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
del ins
gc.collect()
return ins_agg
# Preprocess credit_card_balance.csv
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('data/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
# General aggregations
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist()])
# Count credit card lines
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()
del cc
gc.collect()
return cc_agg
# LightGBM GBDT with KFold or Stratified KFold
# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code
def kfold_lightgbm(df, num_folds, stratified = False, debug= False):
# Divide in training/validation and test data
train_df = df[df['TARGET'].notnull()]
test_df = df[df['TARGET'].isnull()]
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
# LightGBM parameters found by Bayesian optimization
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1, )
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
# Write submission file and plot feature importance
if not debug:
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df
# Display/plot feature importance
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png')
def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug)
if __name__ == "__main__":
submission_file_name = "submission_kernel02.csv"
with timer("Full model run"):
main()
|
[
"aukslius@gmail.com"
] |
aukslius@gmail.com
|
4ae48a9d376d47aaba570ad7568afe37cbb02418
|
6953e2f2ef4d70f940dfbe0671f1d2ef6bb1bd03
|
/classtutorial.py
|
51d73ab22cb1123c9a0693d0cb06b496a8cd283e
|
[] |
no_license
|
mogarg/Python-Tutorial-Lynda
|
2e786880a2e98b067095af5fefb4b1b3adac4bd2
|
452326453ddb6990d81e1a6b5a478da72f8769cb
|
refs/heads/master
| 2021-01-17T17:44:27.357207
| 2016-06-18T22:45:07
| 2016-06-18T22:45:07
| 58,245,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
#!/usr/bin/python3
class Fibonacci():
def __init__(self, a, b):
self.a = a
self.b = b
def series(self):
while(True):
yield(self.b)
self.a, self.b = self.b, self.a + self.b
f = Fibonacci(0, 1)
for r in f.series():
if r > 100: break
print(r)
|
[
"matpsycic@gmail.com"
] |
matpsycic@gmail.com
|
14b450a72c93ad9b78cf7685fe19e4122eb15c24
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_mp21.py
|
562fa5609e8ddc81fe2febf073542f27d358c618
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from xcp2k.inputsection import InputSection
from xcp2k.classes._mp2_info1 import _mp2_info1
class _mp21(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Method = None
self.Big_send = None
self.MP2_INFO = _mp2_info1()
self._name = "MP2"
self._keywords = {'Method': 'METHOD', 'Big_send': 'BIG_SEND'}
self._subsections = {'MP2_INFO': 'MP2_INFO'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
73616b9e93361ad0fc86bdeef7e8e7d1c8c6c9fb
|
c86043aff8b6803102e44bc0f323b5dd3e2e3a2d
|
/comp/plot_numbers.py
|
b8e06d6ceeeb439f02390acf3575a407cef2e273
|
[] |
no_license
|
ivotron/open-comp-rsc-popper
|
6a3a2d6fe0b17aae5d4f26ff25ed186187e3c714
|
05acba677f68f3fea6c88245c84d181384163a96
|
refs/heads/master
| 2020-03-16T21:54:13.804728
| 2018-10-18T08:16:16
| 2018-10-18T08:16:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import sys, matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
import numpy as np
fpath = sys.argv[1]
data = np.load(fpath)
pl.plot(data)
pl.savefig('comp/data/original.png')
|
[
"ivo.jimenez@gmail.com"
] |
ivo.jimenez@gmail.com
|
4d23735583d49ed6fba1925bf636572e5d146be5
|
2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b
|
/121.买卖股票的最佳时机.py
|
7cd0e5ce63fc4da08187b59ea4f973e49037b644
|
[] |
no_license
|
mqinbin/python_leetcode
|
77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3
|
73e0c81867f38fdf4051d8f58d0d3dc245be081e
|
refs/heads/main
| 2023-03-10T18:27:36.421262
| 2021-02-25T07:24:10
| 2021-02-25T07:24:10
| 314,410,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
#
# @lc app=leetcode.cn id=121 lang=python3
#
# [121] 买卖股票的最佳时机
#
# @lc code=start
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for i in range(1, len(prices)):
max_profit = max(prices[i] - min_price ,max_profit)
min_price = min(min_price, prices[i])
return max_profit
# @lc code=end
|
[
"mqinbin@gmail.com"
] |
mqinbin@gmail.com
|
7ea7df614f889ecc385d58e0cb8da56dff59d665
|
bb735076e6a351dd6cbf74b7eb34b9a4538789b7
|
/Laboratorio 25_4.py
|
562bd35a30b90071fdfb8a5713776cf23201b51f
|
[] |
no_license
|
Santi0207/mapa-mental
|
27a4915c9c5c2184b4325f90eab53820f4ea7a6e
|
95dff50253792145deea509337d83d1b474aea15
|
refs/heads/main
| 2023-05-03T03:03:06.578337
| 2021-05-25T19:38:41
| 2021-05-25T19:38:41
| 359,498,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 14:19:58 2021
@author: santi
"""
def cantidad_A(palabra):
cont=0
for x in range (len(palabra)):
if palabra [x] =="a" or palabra [x] =="A":
cont+=1
return cont
palabra=input("Ingrese una palabra: ")
print ("la palabra", palabra,"tiene", cantidad_A(palabra), "a")
|
[
"noreply@github.com"
] |
Santi0207.noreply@github.com
|
147b3bc0148ddc69e31304519e65c37ad3c790e6
|
80de5ac86ce85b5aa93788d5d2325d88b87b47f7
|
/cf/1334/c.py
|
0d9603f1d8a8e97a68d5e3f095f080f1f5405a4e
|
[] |
no_license
|
ethicalrushi/cp
|
9a46744d647053fd3d2eaffc52888ec3c190f348
|
c881d912b4f77acfde6ac2ded0dc9e0e4ecce1c1
|
refs/heads/master
| 2022-04-24T07:54:05.350193
| 2020-04-27T20:27:31
| 2020-04-27T20:27:31
| 257,911,320
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
t = int(input())
for _ in range(t):
n = int(input())
a =[]
for i in range(n):
u, v = [int(x) for x in input().strip().split()]
a.append([u,v])
if n==1:
res=a[0]
else:
mn = 10**10
si = None
for i in range(1,n):
if a[i][0]>a[i-1][1]:
diff = a[i-1][1]
else:
diff = a[i][0]
if diff<mn:
mn = diff
si = i
if a[0][0]>a[-1][1]:
diff = a[-1][1]
else:
diff = a[0][0]
if diff<mn:
mn = diff
si = 0
# print(si)
if si is None:
res = min(a[i][0] for i in range(n))
else:
# res=0
res=a[si][0]
ct=1
prev_i=si
i = si+1
if i==n:
i=0
while ct<n:
# print(i, prev_i, res)
res+=max(0,a[i][0]-a[prev_i][1])
prev_i = i
i+=1
if i==n:
i=0
ct+=1
print(res)
|
[
"pupalerushikesh@gmail.com"
] |
pupalerushikesh@gmail.com
|
2722446b64a90b86d4f746b22fdad8f220f3b9d9
|
172e35bb936c0219c1ae751f48c331047b7fd68f
|
/problems/py-strings/gc2.py
|
431b8360e044c41e1d33d7b736c245c75d625fd6
|
[] |
no_license
|
cmrrsn/abe487
|
8f25d49533450ee69637fe397aeec7ba374642e0
|
2572e418bbd7d76fe2028da07896c02de40cd420
|
refs/heads/master
| 2021-01-23T17:55:25.414975
| 2017-12-05T19:38:40
| 2017-12-05T19:38:40
| 102,779,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
#!/usr/bin/env python3
import sys
import os
def main():
"""main"""
files = sys.argv[1:]
if len(files) != 1:
print('Usage: {} FILE' .format(sys.argv[0]))
sys.exit(1)
if os.path.isfile(files[0]) == False:
print('"{}" is not a file'.format(files[0]))
sys.exit(1)
for seq in open(files[0]):
i=0
gc=0
for bp in seq:
if bp in 'aAtTcCgG':
i += 1
if bp in 'gGcC':
gc += 1
pgc = int((gc/i)*100)
print('{}'.format(pgc))
if __name__ == '__main__':
main()
|
[
"cmmorrison1@login2.cm.cluster"
] |
cmmorrison1@login2.cm.cluster
|
06b857862b7da068774dc2990c21f01bfaffbd27
|
aec21c0a63b4a29fe1c911fada0dc479c1803f02
|
/scripts/profile.py
|
80a0a6a6f07b8305226999c54b80794a781378f4
|
[
"MIT"
] |
permissive
|
ultimachine/Reflow-Profiler
|
fb3e4cde1a8649d4d68e4853eee43d39079ecec2
|
f472c14a9555473a83626909da16ffcf2db3f59b
|
refs/heads/master
| 2020-05-29T22:58:24.993275
| 2013-07-25T17:13:56
| 2013-07-25T17:13:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,600
|
py
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import serial
import time
import sys
import numpy as np
import re
import signal
import argparse
#Setup Command line arguments
parser = argparse.ArgumentParser(
prog = "reflow-profiler",
usage = "%(prog)s [options] input...",
description = "Log thermocouple data from serial to profile a reflow oven."
)
parser.add_argument("-p", "--port", action='store', default='', help="set serial port")
parser.add_argument("-b", "--baudrate", action='store', type=int, default=115200, help="set serial port (default: 115200)")
parser.add_argument("-n", "--nograph", action='store_true', default=False, help="supress graph data")
parser.add_argument('--version', action='version', version="%(prog)s 0.0.1-dev")
#Always output help by default
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
endLoop = False
char = ''
#Setup shutdown handlers
def signal_handler(signal, frame):
print "Stop record."
global endLoop
endLoop = True
signal.signal(signal.SIGINT, signal_handler)
print "Reflow Profiler"
if args.port == "":
print "No serial port specified, exiting"
sys.exit(0)
print "Opening " + args.port
print "Connecting at " + str(args.baudrate) + " baud"
ser = serial.Serial(port=args.port, baudrate=args.baudrate)
print "Initializing thermocouple board"
ser.setDTR(False)
time.sleep(1)
ser.setDTR(True)
while not ser.inWaiting():
time.sleep(0.1)
print "Press Enter to begin profiling"
ser.flushInput()
ser.flushOutput()
raw_input()
print "Gathering data"
profile = "Test started at: " + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
startTime = time.time()
output = ser.read(ser.inWaiting())
tempString = ""
lastLen = 0
lastTime = startTime
chars = 0
print "Current temp (C): ",
while not endLoop:
char = ser.read(ser.inWaiting())
output += char
sys.stdout.write(char)
ser.close()
endTime = time.time()
humanEndTime = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
profile += "Test ended at: " + humanEndTime
runTime = endTime - startTime
samples = map(int,re.findall(r'\b\d+\b', output))
timeIncrement = runTime/len(samples)
timeScale = np.arange(0,runTime,timeIncrement)
residency = [0,0,0,0,0,0]
finding = 0
maxTemp = max(samples)
firstReading = False
startAbove227 = 0
endAbove227 = 0
#find time above points
lastVal = samples[0]
for idx, val in enumerate(samples):
#clone prevoius val ifdeviate more than +-2 degrees
if val in range(lastVal-3,lastVal+3,1):
lastVal = val
else:
val = lastVal
samples[idx] = lastVal
if val == 40 and finding == 0:
startTime = idx
profileStart = idx
elif val == 150 and finding == 0:
lastTime = idx
residency[finding] = (lastTime - startTime) * timeIncrement
startTime = idx
finding = 1
elif val == 175 and finding == 1:
lastTime = idx
residency[finding] = (lastTime - startTime) * timeIncrement
startTime = idx
finding = 2
elif val == maxTemp and finding == 2:
lastTime = idx
residency[finding] = (lastTime - startTime) * timeIncrement
startTime = idx
finding = 4
elif val == 227 and not firstReading:
startAbove227 = idx
firstReading = True
elif val == 227:
endAbove227 = idx
elif val == 95 and finding == 4:
lastTime = idx
residency[5] = (lastTime - profileStart) * timeIncrement/60
residency[3] = (endAbove227 - startAbove227) * timeIncrement
residency[4] = (227-40)/((endAbove227 - lastTime) * timeIncrement)
profile += "\nMax temp: " + str(maxTemp)
profile += "\nSeconds from 40C to 150C: " + str(residency[0])
profile += "\nSeconds from 150C to 175C: " + str(residency[1])
profile += "\nSeconds to max temp: " + str(residency[2])
profile += "\nSeconds above 227C: " + str(residency[3])
profile += "\nCooldown rate (C/sec): " + str(residency[4])
profile += "\nProfile length (min): " + str(residency[5]) + "\n"
print profile
profile = profile + "Data entries taken every " + str(timeIncrement) + " seconds:\n" + output
path = humanEndTime+".waveprofile"
f = open(path,"w")
f.write(profile)
f.close()
if not args.nograph:
plt.plot(timeScale, samples)
plt.plot([0,runTime],[150,150])
plt.plot([0,runTime],[175,175])
plt.plot([0,runTime],[227,227])
plt.plot([0,runTime],[235,235])
plt.plot([0,runTime],[255,255])
plt.ylabel('Temp (C)')
plt.xlabel('Time (S)')
plt.title('Thermocouple Output')
plt.show()
|
[
"kd2cca@gmail.com"
] |
kd2cca@gmail.com
|
ccf640a6f3089b61899c512ea864d117a27d00e3
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/a7WiKcyrTtggTym3f_11.py
|
38c97ae03767b14cd4f73e59493d45390792e3c0
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
"""
Create a function that takes two numbers as arguments and return the LCM of
the two numbers.
### Examples
lcm(3, 5) ➞ 15
lcm(14, 28) ➞ 28
lcm(4, 6) ➞ 12
### Notes
* Don't forget to return the result.
* You may want to use the GCD function to make this a little easier.
* LCM stands for least common multiple, the smallest multiple of both integers.
"""
def lcm(a, b):
m = max(a,b)
while True:
if m%a==0 and m%b==0:
return m
m += 1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a9151a391b64c038d80fc25c24e8ae9bcc938c36
|
927fc31a0144c308a5c8d6dbe46ba8f2728276c9
|
/tasks/final_tasks/file_handling/2.count_word_in_file.py
|
7ad9f89f0c38383b2a89b17194e5f946ad3c11d8
|
[] |
no_license
|
ChandraSiva11/sony-presamplecode
|
b3ee1ba599ec90e357a4b3a656f7a00ced1e8ad3
|
393826039e5db8a448fa4e7736b2199c30f5ed24
|
refs/heads/master
| 2023-01-14T00:09:19.185822
| 2020-11-23T02:07:00
| 2020-11-23T02:07:00
| 299,527,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
# Python Program to Count the Number of Words in a Text File
def main():
num_words = 0
with open('text_doc.txt', 'r') as f:
for line in f:
words = line.split()
num_words += len(words)
print('Number of words', num_words)
if __name__ == '__main__':
main()
|
[
"chandra2.s@aricent.com"
] |
chandra2.s@aricent.com
|
02640468512f314887a6c73faf4f94bf5dd034af
|
a0583ceb402497e77a2f683424decea16b8ab80b
|
/sushichef.py
|
0ca31f6a58af0eb64e63a82b0aa4e93b4a557a15
|
[
"MIT"
] |
permissive
|
richard-dinh/sushi-chef-newz-beat
|
b5a07e21ec18b7539071a520dc910c8910253d3a
|
766e5251b2f0002db0c5fe912fe0bf07f7c1ad94
|
refs/heads/main
| 2023-03-05T00:32:11.104007
| 2020-10-23T23:49:36
| 2020-10-23T23:49:36
| 309,466,267
| 0
| 0
|
MIT
| 2020-11-02T18:54:48
| 2020-11-02T18:54:47
| null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
#!/usr/bin/env python
import os
import sys
from ricecooker.utils import downloader, html_writer
from ricecooker.chefs import YouTubeSushiChef
from ricecooker.classes import nodes, files, questions, licenses
from ricecooker.config import LOGGER # Use LOGGER to print messages
from ricecooker.exceptions import raise_for_invalid_channel
from le_utils.constants import exercises, content_kinds, file_formats, format_presets, languages
# Run constants
################################################################################
CHANNEL_ID = "32e5033ebc7a456b91fccbd2747d4035" # UUID of channel
CHANNEL_NAME = "Newz Beat" # Name of Kolibri channel
CHANNEL_SOURCE_ID = "<yourid>" # Unique ID for content source
CHANNEL_DOMAIN = "<yourdomain.org>" # Who is providing the content
CHANNEL_LANGUAGE = "en" # Language of channel
CHANNEL_DESCRIPTION = None # Description of the channel (optional)
CHANNEL_THUMBNAIL = None # Local path or url to image file (optional)
CONTENT_ARCHIVE_VERSION = 1 # Increment this whenever you update downloaded content
# Additional constants
################################################################################
# The chef subclass
################################################################################
class NewzBeatChef(YouTubeSushiChef):
"""
This class converts content from the content source into the format required by Kolibri,
then uploads the {channel_name} channel to Kolibri Studio.
Your command line script should call the `main` method as the entry point,
which performs the following steps:
- Parse command line arguments and options (run `./sushichef.py -h` for details)
- Call the `SushiChef.run` method which in turn calls `pre_run` (optional)
and then the ricecooker function `uploadchannel` which in turn calls this
class' `get_channel` method to get channel info, then `construct_channel`
to build the contentnode tree.
For more info, see https://ricecooker.readthedocs.io
"""
channel_info = {
'CHANNEL_ID': CHANNEL_ID,
'CHANNEL_SOURCE_DOMAIN': CHANNEL_DOMAIN,
'CHANNEL_SOURCE_ID': CHANNEL_SOURCE_ID,
'CHANNEL_TITLE': CHANNEL_NAME,
'CHANNEL_LANGUAGE': CHANNEL_LANGUAGE,
'CHANNEL_THUMBNAIL': CHANNEL_THUMBNAIL,
'CHANNEL_DESCRIPTION': CHANNEL_DESCRIPTION,
}
# CLI
################################################################################
if __name__ == '__main__':
# This code runs when sushichef.py is called from the command line
chef = NewzBeatChef()
chef.main()
|
[
"kevino@theolliviers.com"
] |
kevino@theolliviers.com
|
fcceb2b371e8ee367db6bbabddea88495e6aef69
|
ed7708e815a30ebbac6efc696d826b9c3d59c561
|
/1049.py
|
1ec3142525e48fe57e4bc7096ee7b7216248eea4
|
[] |
no_license
|
LehmannPi/URIOnlineJudge
|
222e1c44f6613e79a023227bd6f1ab9ac353eedd
|
5bb82d7b623c152aeb89264ca16dc3c758bf1209
|
refs/heads/master
| 2020-12-04T23:34:01.365947
| 2020-01-05T15:38:17
| 2020-01-05T15:38:17
| 231,935,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
#estrutura: l[0]; classe: l[1]; habitos_alimentares: l[2]
l = []
for i in range(3):
l.append(input())
if l[0] == 'vertebrado':
if l[1] == 'ave':
if l[2] == 'carnivoro':
print('aguia')
else:
print('pomba')
else:
if l[2] == 'onivoro':
print('homem')
else:
print('vaca')
else:
if l[1] == 'inseto':
if l[2] == 'hematofago':
print('pulga')
else:
print('lagarta')
else:
if l[2] == 'hematofago':
print('sanguessuga')
else:
print('minhoca')
|
[
"noreply@github.com"
] |
LehmannPi.noreply@github.com
|
4e6c68f2c808e9ad3a64694e5548a0ffd6165878
|
7532253fcc5d2fb01ceefe924bac7de45376e97b
|
/main.py
|
2b371e6750e3035b3d4edc940eff390d60352c51
|
[
"MIT"
] |
permissive
|
NixonZ/QNetwork-RL
|
37879cc0b976ed9594833598ba3532546d2fef51
|
acf34dd8d598104267da88f3eacc3e44f06265a7
|
refs/heads/main
| 2023-08-28T18:11:31.536607
| 2021-09-29T13:14:54
| 2021-09-29T13:14:54
| 381,694,681
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,272
|
py
|
from environment.env import Env,node
from environment.metalog import U,Exp,metalog
import numpy as np
# from agent.agent import MPNN,Graph_Representation,Agent
from trainer import trainer
from agent.Qmix import Qmix
from agent.agent import device
p = 2
b = 32
n = 6
M = 100
temp = Env(
arrival = [lambda t: Exp(0.1),lambda t: Exp(0.1)],
num_priority= p,
network = [
[]
],
nodes_list = [
node( [
metalog.from_sampler(b,lambda : Exp(0.2),n,(0,np.inf)),
metalog.from_sampler(b,lambda : Exp(0.23),n,(0,np.inf))
], M, p),
],
b = b,
n_terms = n,
M = M
)
# print(temp.action_space)
# print(temp.obervation_space)
# print(temp.get_state()[0].shape)
quantiles = np.array(
[
np.array(metalog.from_sampler(b,lambda : Exp(0.2),n,(0,np.inf)).quantile_val),
np.array(metalog.from_sampler(b,lambda : Exp(0.2),n,(0,np.inf)).quantile_val)
])
temp.step( ( "add node", (M-10,quantiles) ) )
temp.step( ( "add edge", (0, 1.00 ) ) )
temp.step( ( "add node", (M-50,quantiles) ) )
temp.step( ( "add edge", (2, 10 ) ) )
temp.step( ( "edit weights", [[0,2],0.9] ) )
print()
print(temp.action_space)
print(temp.obervation_space)
print(temp.get_state()[0].shape)
print(temp.get_state())
print(temp.get_state_torch())
# print()
data = temp.get_state_torch()
edge_index = data.edge_index
edge_attr = data.edge_attr
x = data.x
# forward_message = MPNN((p,b),1,25,mode='forward').double()
# backward_message = MPNN((p,b),1,25,mode='backward').double()
# x = forward_message.forward(x,edge_attr,edge_index) + backward_message.forward(x,edge_attr,edge_index)
# print()
# model = Graph_Representation((p,b),1,250,500,2).double()
# print(model.forward(data))
# print()
# agent = Agent("edit weights",(p,b),1,25,50,2).double()
# print(agent.forward(data))
# print(agent)
# [ (p.numel(),p.names) for p in agent.parameters() ]
temp = trainer(p,b,M,temp,10,25,3,[Exp(0.7) for _ in range(10000)], max_nodes=5, buffer_size=5e5, train_size=100, lr= 0.0001*6, gamma = 0.9, epsilon = 0.15)
temp.modules.to(device=device)
print(sum(p.numel() for p in temp.modules.parameters() if p.requires_grad))
temp.train(100000,"test1")
# temp = Qmix(2,(p,b),1,100,250,10).double()
# temp.set_weights(data)
|
[
"nalinshani14@gmail.com"
] |
nalinshani14@gmail.com
|
ef74b6c780caea8be24fb7a36c1bd5e228e66148
|
f36a9701975eec736b5e43ab09ec318eee80c8cc
|
/pyspeckit/spectrum/widgets.py
|
ae949705761f3d5018daacf9ece04cedd453487e
|
[
"MIT"
] |
permissive
|
soylentdeen/pyspeckit
|
e995f38531256d85313038a0ddeb181a4c6480b8
|
11c449c6951468f2c07dfda3b1177b138f810f16
|
refs/heads/master
| 2021-01-18T11:32:51.659032
| 2013-06-26T00:39:22
| 2013-06-26T00:39:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,757
|
py
|
from matplotlib.widgets import Widget,Button,Slider
from matplotlib import pyplot
import matplotlib
class dictlist(list):
def __init__(self, *args):
list.__init__(self, *args)
self._dict = {}
self._dict_index = {}
for ii,value in enumerate(self):
if len(value) == 2:
self._dict[value[0]] = value[1]
self._dict_index[value[0]] = ii
self._dict_index[ii] = value[0]
else:
self._dict[ii] = value
self._dict_index[ii] = ii
def __getitem__(self, key):
if type(key) is int:
return super(dictlist,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, value):
if type(key) is int:
super(dictlist,self).__setitem__(key,value)
self._dict[self._dict_index[key]] = value
else:
if key in self._dict:
self._dict[key] = value
self[self._dict_index[key]] = value
else:
self._dict[key] = value
self._dict_index[key] = len(self)
self._dict_index[len(self)] = key
self.append(value)
def __slice__(self, s1, s2):
pass
def values(self):
return [self._dict[self._dict_index[ii]] for ii in xrange(len(self))]
def keys(self):
return [self._dict_index[ii] for ii in xrange(len(self))]
class ModifiableSlider(Slider):
def set_valmin(self, valmin):
"""
Change the minimum value of the slider
"""
self.valmin = valmin
self.ax.set_xlim((self.valmin,self.valmax))
if self.val < self.valmin:
self.set_val(self.valmin)
if self.valinit < self.valmin:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
def set_valmax(self, valmax):
"""
Change the maximum value of the slider
"""
self.valmax = valmax
self.ax.set_xlim((self.valmin,self.valmax))
if self.val > self.valmax:
self.set_val(self.valmax)
if self.valinit > self.valmax:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
class FitterSliders(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, specfit, targetfig, npars=1, toolfig=None, parlimitdict={}):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.parlimitdict = parlimitdict
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
if hasattr(targetfig.canvas.manager,'window'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.2,right=0.9)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.2, right=0.9)
bax = self.toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
self.set_sliders(parlimitdict)
def reset(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in self.sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in self.sliders:
slider.reset()
# reset drawon
for slider, b in zip(self.sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = self.toolfig.subplotpars.validate
self.toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(reset)
self.toolfig.subplotpars.validate = validate
def clear_sliders(self):
"""
Get rid of the sliders...
"""
try:
for sl in self.sliders:
sl.ax.remove()
except NotImplementedError:
for sl in self.sliders:
self.specfit.Spectrum.plotter.figure.delaxes(sl.ax)
self.specfit.Spectrum.plotter.refresh()
def set_sliders(self, parlimitdict={}):
"""
Set the slider properties, actions, and values
can also reset their limits
"""
def update(value):
mpp = [slider.val for slider in self.sliders]
for line in self.specfit.modelplot:
line.set_ydata(self.specfit.get_model_frompars(line.get_xdata(),mpp))
# update components too
for ii,line in enumerate(self.specfit._plotted_components):
xdata = line.get_xdata()
modelcomponents = self.specfit.fitter.components(xdata, mpp, **self.specfit._component_kwargs)
for jj,data in enumerate(modelcomponents):
if ii % 2 == jj:
# can have multidimensional components
if len(data.shape) > 1:
for d in (data):
line.set_ydata(d)
else:
line.set_ydata(data)
self.specfit.Spectrum.plotter.refresh()
self.sliders = dictlist()
npars = len(self.specfit.parinfo)
for param in self.specfit.parinfo:
name = param['parname']
value = param['value']
limited = param['limited']
limits = param['limits']
# make one less subplot so that there's room for buttons
# param['n'] is zero-indexed, subplots are 1-indexed
ax = self.toolfig.add_subplot(npars+1,1,param['n']+1)
ax.set_navigate(False)
if name in parlimitdict:
limits = parlimitdict[name]
limited = [True,True]
if limited[0]:
vmin = limits[0]
elif value != 0:
vmin = min([value/4.0,value*4.0])
else:
vmin = -1
if limited[1]:
vmax = limits[1]
elif value != 0:
vmax = max([value/4.0,value*4.0])
else:
vmax = 1
self.sliders[name] = ModifiableSlider(ax,
name, vmin, vmax, valinit=value)
self.sliders[-1].on_changed(update)
def get_values(self):
return [s.val for s in self.sliders]
class FitterTools(Widget):
"""
A tool to monitor and play with :class:`pyspeckit.spectrum.fitter` properties
--------------------------
| Baseline range [x,x] |
| Baseline order - |
| (Baseline subtracted) |
| |
| Fitter range [x,x] |
| Fitter type ------- |
| Fitter Guesses [p,w] |
| ... ... |
| ... ... |
| |
| (Fit) (BL fit) (reset) |
--------------------------
"""
def __init__(self, specfit, targetfig, toolfig=None, nsubplots=12):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.baseline = specfit.Spectrum.baseline
self.plotter = specfit.Spectrum.plotter
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
self.toolfig.canvas.set_window_title("Fit Tools for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.05,right=0.95)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.0, right=1.0)
#bax = self.toolfig.add_axes([0.6, 0.05, 0.15, 0.075])
#self.buttonrefresh = Button(bax, 'Refresh')
# buttons ruin everything.
# fax = self.toolfig.add_axes([0.1, 0.05, 0.15, 0.075])
# self.buttonfit = Button(fax, 'Fit')
#
# resetax = self.toolfig.add_axes([0.7, 0.05, 0.15, 0.075])
# self.buttonreset = Button(resetax, 'Reset')
# resetblax = self.toolfig.add_axes([0.3, 0.05, 0.15, 0.075])
# self.buttonresetbl = Button(resetblax, 'Reset BL')
# resetfitax = self.toolfig.add_axes([0.5, 0.05, 0.15, 0.075])
# self.buttonresetfit = Button(resetfitax, 'Reset fit')
def refresh(event):
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
def fit(event):
self.specfit.button3action(event)
def reset_fit(event):
self.specfit.guesses = []
self.specfit.npeaks = 0
self.specfit.includemask[:] = True
self.refresh(event)
def reset_baseline(event):
self.baseline.unsubtract()
self.refresh(event)
def reset(event):
reset_baseline(event)
reset_fit(event)
self.plotter()
self.refresh(event)
# during refresh there can be a temporary invalid state
# depending on the order of the refresh so we turn off
# validation for the refreshting
#validate = self.toolfig.subplotpars.validate
#self.toolfig.subplotpars.validate = False
#self.buttonrefresh.on_clicked(refresh)
#self.toolfig.subplotpars.validate = validate
# these break everything.
# self.buttonfit.on_clicked(fit)
# self.buttonresetfit.on_clicked(reset_fit)
# self.buttonresetbl.on_clicked(reset_baseline)
# self.buttonreset.on_clicked(reset)
#menuitems = []
#for label in ('polynomial','blackbody','log-poly'):
# def on_select(item):
# print 'you selected', item.labelstr
# item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
# on_select=on_select)
# menuitems.append(item)
#menu = Menu(fig, menuitems)
self.axes = [self.toolfig.add_subplot(nsubplots,1,spnum, frame_on=False, navigate=False, xticks=[], yticks=[])
for spnum in xrange(1,nsubplots+1)]
#self.axes = self.toolfig.add_axes([0,0,1,1])
self.use_axes = [0,1,2,4,5,6,7,8,9,10,11]
self.labels = dict([(axnum,None) for axnum in self.use_axes])
self.update_information()
self.targetfig.canvas.mpl_connect('button_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('key_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('draw_event',self.refresh)
def refresh(self, event):
try:
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
except:
# ALWAYS fail silently
# this is TERRIBLE coding practice, but I have no idea how to tell the object to disconnect
# when the figure is closed
pass
def update_information(self, **kwargs):
self.information = [
("Baseline Range","(%g,%g)" % (self.baseline.xmin,self.baseline.xmax)),
("Baseline Order","%i" % (self.baseline.order)),
("Baseline Subtracted?","%s" % (self.baseline.subtracted)),
("Fitter Range","(%g,%g)" % (self.specfit.xmin,self.specfit.xmax)),
("Fitter Type","%s" % (self.specfit.fittype)),
]
for ii in xrange(self.specfit.npeaks):
guesses = tuple(self.specfit.guesses[ii:ii+3])
if len(guesses) == 3:
self.information += [("Fitter guesses%i:" % ii , "p: %g c: %g w: %g" % guesses) ]
else:
break
self.show_labels(**kwargs)
def show_selected_region(self):
self.specfit.highlight_fitregion()
def show_label(self, axis, text, xloc=0.0, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_value(self, axis, text, xloc=0.5, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_labels(self, **kwargs):
for axnum,(label,text) in zip(self.use_axes, self.information):
if self.labels[axnum] is not None and len(self.labels[axnum]) == 2:
labelobject,textobject = self.labels[axnum]
labelobject.set_label(label)
textobject.set_text(text)
else:
self.labels[axnum] = (self.show_label(self.axes[axnum],label),
self.show_value(self.axes[axnum],text))
def update_info_texts(self):
for newtext,textobject in zip(self.information.values(), self.info_texts):
textobject.set_text(newtext)
#import parinfo
#
#class ParameterButton(parinfo.Parinfo):
# """
# A class to manipulate individual parameter values
# """
# def __init__(self,
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
407c2a6677c326a7a56789bea899851a9a6a5764
|
dda862418770f3885256d96e9bdb13d0759c5f43
|
/codeforces/april-fools-day/is_it_rated.py
|
2f78bf4c3d6d9df72b4d6880be8c4503b3f93453
|
[
"MIT"
] |
permissive
|
bellatrixdatacommunity/data-structure-and-algorithms
|
d56ec485ebe7a5117d4922caeb0cd44c5dddc96f
|
d24c4001a797c12347973263a0f4f98939e86900
|
refs/heads/master
| 2022-12-03T00:51:07.944915
| 2020-08-13T20:30:51
| 2020-08-13T20:30:51
| 270,268,375
| 4
| 0
|
MIT
| 2020-08-13T20:30:53
| 2020-06-07T10:19:36
|
Python
|
UTF-8
|
Python
| false
| false
| 114
|
py
|
"""
[A. Is it rated?](https://codeforces.com/contest/1331/problem/A)
"""
print("No") # The contest was not rated
|
[
"adityaraman96@gmail.com"
] |
adityaraman96@gmail.com
|
2846ddd10fe2bc71174ac826856a840867ef6f23
|
8ed9ad7935736bd53c112768e823585d67300773
|
/pipelines/record_only.py
|
64f333cad79b7a1fd045f547a2cb2fae9b60ceb9
|
[] |
no_license
|
danielhertenstein/streaming-object-detection
|
5913da972d72cdd6ce4aedbfb2b83f10de261292
|
3b7aa2923d0f5e4b03ff97ce13e12509f1abcacc
|
refs/heads/master
| 2021-04-27T10:12:51.607914
| 2018-04-28T19:04:39
| 2018-04-28T19:04:39
| 122,532,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
import assembler
from pieces import WebcamCapture, Record
def main():
pipeline = assembler.Pipeline(
pieces=[[WebcamCapture], [Record]],
options=[[{}], [{'framerate': 60.0}]]
)
pipeline.run()
if __name__ == '__main__':
main()
|
[
"daniel.hertenstein@gmail.com"
] |
daniel.hertenstein@gmail.com
|
5e2bc1d085bbd63facebfeafee69c6b6b83a580a
|
c1aae64218b22140097dd9c4047684fcaa61cb6e
|
/code/栈的压入、弹出序列.py
|
8ae1b7115d253d3ed4f61511426741ad471436da
|
[] |
no_license
|
KIM199511/-offer
|
b3b75609d4885b1616db541aa10e54f83655e4ce
|
0ddb5c0d7f32fe3654ab9de7ac132a50c016dd7e
|
refs/heads/master
| 2022-11-08T22:00:02.598019
| 2020-07-05T16:27:29
| 2020-07-05T16:27:29
| 277,333,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/11 19:16
# @Author : XXX
# @title : 栈的压入、弹出序列
# @Site :
# @File : 栈的压入、弹出序列.py
# @Software: PyCharm
class Solution:
def IsPopOrder(self,pushV, popV):
temp = []
while pushV:
for value in pushV:
temp.append(value)
while temp[-1] == popV[0]:
temp.pop()
popV.pop(0)
if popV == []:
return True
return False
if __name__ == '__main__':
A = Solution()
a = [1,2,3,4,5]
b = [4,5,3,2,1]
print(A.IsPopOrder(a,b))
|
[
"15061112157@163.com"
] |
15061112157@163.com
|
731f1ef4f038c7584b72ccae8637f6c4ca8c0302
|
6414ff7510850f898ae791af24bd4daebedd1ed8
|
/Unet_Mobile/train.py
|
e62ad2b35e106fb7c030a35c6630893425aba13a
|
[
"MIT"
] |
permissive
|
Ice833/Semantic-Segmentation
|
00ba943a0e33e34e19cbd579598ef8ac4f081460
|
23d23f6da3b34884c044a2253d65a1e4097adb2d
|
refs/heads/master
| 2022-12-02T02:37:46.145036
| 2020-08-14T02:17:15
| 2020-08-14T02:17:15
| 284,207,399
| 0
| 0
|
MIT
| 2020-08-01T07:05:31
| 2020-08-01T07:05:30
| null |
UTF-8
|
Python
| false
| false
| 4,563
|
py
|
from nets.unet import mobilenet_unet
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
import keras
from keras import backend as K
import numpy as np
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
def generate_arrays_from_file(lines,batch_size):
# 获取总长度
n = len(lines)
i = 0
while 1:
X_train = []
Y_train = []
# 获取一个batch_size大小的数据
for _ in range(batch_size):
if i==0:
np.random.shuffle(lines)
name = lines[i].split(';')[0]
# 从文件中读取图像
img = Image.open(r".\dataset2\jpg" + '/' + name)
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
X_train.append(img)
name = (lines[i].split(';')[1]).replace("\n", "")
# 从文件中读取图像
img = Image.open(r".\dataset2\png" + '/' + name)
img = img.resize((int(WIDTH/2),int(HEIGHT/2)))
img = np.array(img)
seg_labels = np.zeros((int(HEIGHT/2),int(WIDTH/2),NCLASSES))
for c in range(NCLASSES):
seg_labels[: , : , c ] = (img[:,:,0] == c ).astype(int)
seg_labels = np.reshape(seg_labels, (-1,NCLASSES))
Y_train.append(seg_labels)
# 读完一个周期后重新开始
i = (i+1) % n
yield (np.array(X_train),np.array(Y_train))
def loss(y_true, y_pred):
loss = K.categorical_crossentropy(y_true,y_pred)
return loss
if __name__ == "__main__":
log_dir = "logs/"
# 获取model
model = mobilenet_unet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
# model.summary()
BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.6/')
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % ( '1_0' , 224 )
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras.utils.get_file(model_name, weight_path )
print(weight_path)
model.load_weights(weights_path,by_name=True,skip_mismatch=True)
# model.summary()
# 打开数据集的txt
with open(r".\dataset2\train.txt","r") as f:
lines = f.readlines()
# 打乱行,这个txt主要用于帮助读取数据来训练
# 打乱的数据更有利于训练
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
# 90%用于训练,10%用于估计。
num_val = int(len(lines)*0.1)
num_train = len(lines) - num_val
# 保存的方式,1世代保存一次
checkpoint_period = ModelCheckpoint(
log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss',
save_weights_only=True,
save_best_only=True,
period=1
)
# 学习率下降的方式,val_loss三次不下降就下降学习率继续训练
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
verbose=1
)
# 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
early_stopping = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=10,
verbose=1
)
# 交叉熵
model.compile(loss = loss,
optimizer = Adam(lr=1e-3),
metrics = ['accuracy'])
batch_size = 2
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
# 开始训练
model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=generate_arrays_from_file(lines[num_train:], batch_size),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint_period, reduce_lr])
model.save_weights(log_dir+'last1.h5')
|
[
"noreply@github.com"
] |
Ice833.noreply@github.com
|
510ee037286e9bb98097631e6f4b1e333b3618f9
|
93c58b92803d0467d29fd9c9c4b1a3998bcf283f
|
/transcriptions/models/show_the_model.py
|
c55516278826758fd4b5e249b1086d7d9947b427
|
[
"MIT"
] |
permissive
|
Ipuch/dms-vs-dc
|
ac4ac5d62bca9839cbe7b60f58fd9cb7f96f4416
|
2878e00f4a862c5bcb9064b0c962922af6be00ea
|
refs/heads/main
| 2023-09-04T00:00:22.787777
| 2023-08-21T20:13:19
| 2023-08-21T20:13:19
| 519,305,613
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,598
|
py
|
"""
This file is to display the human model into bioviz
"""
import os
import bioviz
from transcriptions import Models
# model_name = Models.LEG
# model_name = Models.ARM
# model_name = Models.ACROBAT
model_name = Models.UPPER_LIMB_XYZ_VARIABLES
# model_name = Models.HUMANOID_10DOF
export_model = False
background_color = (1, 1, 1) if export_model else (0.5, 0.5, 0.5)
show_gravity_vector = False if export_model else True
show_floor = False if export_model else True
# show_local_ref_frame = False if export_model else True
show_local_ref_frame = True
show_global_ref_frame = False if export_model else True
show_global_ref_frame = False
show_markers = False if export_model else True
show_mass_center = False if export_model else True
show_global_center_of_mass = False if export_model else True
show_segments_center_of_mass = False if export_model else True
def print_all_camera_parameters(biorbd_viz: bioviz.Viz):
print("Camera roll: ", biorbd_viz.get_camera_roll())
print("Camera zoom: ", biorbd_viz.get_camera_zoom())
print("Camera position: ", biorbd_viz.get_camera_position())
print("Camera focus point: ", biorbd_viz.get_camera_focus_point())
if model_name == Models.LEG:
biorbd_viz = bioviz.Viz(model_name.value,
show_gravity_vector=False,
show_floor=False,
show_local_ref_frame=show_local_ref_frame,
show_global_ref_frame=show_global_ref_frame,
show_markers=show_markers,
show_mass_center=show_mass_center,
show_global_center_of_mass=False,
show_segments_center_of_mass=True,
mesh_opacity=1,
background_color=(1, 1, 1),
)
biorbd_viz.resize(1000, 1000)
biorbd_viz.set_camera_roll(-82.89751054930615)
biorbd_viz.set_camera_zoom(2.7649491449197656)
biorbd_viz.set_camera_position(1.266097531449429, -0.6523601622496974, 0.24962580067391163)
biorbd_viz.set_camera_focus_point(0.07447263939980919, 0.025078204682856153, -0.013568198245759833)
if model_name == Models.ARM:
biorbd_viz = bioviz.Viz(
model_name.value,
show_gravity_vector=False,
show_floor=False,
show_local_ref_frame=show_local_ref_frame,
show_global_ref_frame=show_global_ref_frame,
show_markers=show_markers,
show_mass_center=show_mass_center,
show_global_center_of_mass=False,
show_segments_center_of_mass=True,
mesh_opacity=1,
background_color=(1, 1, 1),
)
biorbd_viz.resize(1000, 1000)
biorbd_viz.set_q([-0.15, 0.24, -0.41, 0.21, 0, 0])
biorbd_viz.set_camera_roll(-84.5816885957667)
biorbd_viz.set_camera_zoom(2.112003880097381)
biorbd_viz.set_camera_position(1.9725681105744026, -1.3204979216430117, 0.35790018139336177)
biorbd_viz.set_camera_focus_point(-0.3283876664932833, 0.5733643134562766, 0.018451815011995998)
if model_name == Models.ACROBAT:
biorbd_viz = bioviz.Viz(
model_name.value,
show_gravity_vector=False,
show_floor=False,
show_local_ref_frame=False,
show_global_ref_frame=False,
show_markers=False,
show_mass_center=False,
show_global_center_of_mass=False,
show_segments_center_of_mass=False,
mesh_opacity=1,
background_color=(1, 1, 1),
)
biorbd_viz.set_camera_position(-8.782458942185185, 0.486269131372712, 4.362010279585766)
biorbd_viz.set_camera_roll(90)
biorbd_viz.set_camera_zoom(0.308185240948253)
biorbd_viz.set_camera_focus_point(1.624007185850899, 0.009961251074366406, 1.940316420941989)
biorbd_viz.resize(600, 900)
if model_name == Models.UPPER_LIMB_XYZ_VARIABLES:
biorbd_viz = bioviz.Viz(
model_name.value,
show_gravity_vector=False,
show_floor=False,
show_local_ref_frame=False,
show_global_ref_frame=False,
show_markers=False,
show_mass_center=False,
show_global_center_of_mass=False,
show_segments_center_of_mass=False,
mesh_opacity=1,
background_color=(1, 1, 1),
)
biorbd_viz.resize(1000, 1000)
# biorbd_viz.set_q([-0.15, 0.24, -0.41, 0.21, 0, 0])
biorbd_viz.set_camera_roll(-100.90843467296737)
biorbd_viz.set_camera_zoom(1.9919059008044755)
biorbd_viz.set_camera_position(0.8330547810707182, 2.4792370867179256, 0.1727481994453778)
biorbd_viz.set_camera_focus_point(-0.2584435804313228, 0.8474543937884143, 0.2124670559215174)
# get current path file
# file_name, extension = os.path.splitext(model_name)
# biorbd_viz.snapshot(f"{file_name}/{Models.UPPER_LIMB_XYZ_VARIABLES.name}.png")
if model_name == Models.HUMANOID_10DOF:
biorbd_viz = bioviz.Viz(
model_name.value[0],
show_gravity_vector=False,
show_floor=False,
show_local_ref_frame=False,
show_global_ref_frame=False,
show_markers=False,
show_mass_center=False,
show_global_center_of_mass=False,
show_segments_center_of_mass=False,
mesh_opacity=1,
background_color=(1, 1, 1),
)
biorbd_viz.resize(1000, 1000)
biorbd_viz.set_q([-0.20120228, 0.84597746, -0.12389997, -0.15, 0.41, -0.37, -0.86, 0.36, 0.39, 0.66, -0.58, 0])
biorbd_viz.set_camera_roll(-91.44517177211645)
biorbd_viz.set_camera_zoom(0.7961539827851234)
biorbd_viz.set_camera_position(4.639962934524132, 0.4405891958030146, 0.577705598983718)
biorbd_viz.set_camera_focus_point(-0.2828701273331326, -0.04065388066757992, 0.9759133347931428)
biorbd_viz.exec()
print_all_camera_parameters(biorbd_viz)
print("Done")
|
[
"pierre.puchaud@umontreal.ca"
] |
pierre.puchaud@umontreal.ca
|
dc518d3adbaa5570a85345dacbb2b97213280b09
|
eb35535691c4153ba2a52774f0e40468dfc6383d
|
/hash_table/uncommon_words.py
|
849d39c6b50e9e3e7e62e2067fc6a68f1b0c2178
|
[] |
no_license
|
BJV-git/leetcode
|
1772cca2e75695b3407bed21af888a006de2e4f3
|
dac001f7065c3c5b210024d1d975b01fb6d78805
|
refs/heads/master
| 2020-04-30T19:04:12.837450
| 2019-03-21T21:56:24
| 2019-03-21T21:56:24
| 177,027,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
def uncommon_words(A,B):
A=A.split(' ')
B=B.split(' ')
res=[]
d={}
for i in A:
d[i] = d.get(i,0)+1
for i in B:
d[i] = d.get(i,0)+1
for i in d:
if d[i]==1:
res.append(i)
return res
|
[
"noreply@github.com"
] |
BJV-git.noreply@github.com
|
2226ec9d9a3aed988198452e5700c9f65c9fc0f1
|
185d97a996e75d153b881d0ebf847ec33e7b049d
|
/primepairs.py
|
1fa54588c0bb0b309897a824aaa8546b59b8aa53
|
[] |
no_license
|
shivank96/pythonbasiclevel
|
88a12d366ed015928cc21b405dcb9ca96d0ade36
|
46623ad75ec176161a780b75651fa80c2d35f107
|
refs/heads/master
| 2023-04-03T13:43:32.974100
| 2021-04-12T13:11:21
| 2021-04-12T13:11:21
| 305,385,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
import collections
def primepairs():
n=int(input())
i=2
k=0
le=0
li=[]
while i<=n:
count=0
j=1
while j<=i:
if i%j==0:
count=count+1
j=j+1
if count==2:
li.append(i)
le=le+1
i=i+1
l=0
# pairs = []
# pair = []
if n%2==0:
response = int(n/2)
else:
response=int(n/2)+1
for l in range(len(li)):
m=1
for m in range(len(li)):
res=li[l]+li[m]
if res== n and li[l]<=response:
print(li[l],"+",li[m],"=",n)
# pair.append(li[l])
# pair.append(li[m])
# pairs.append(pair)
m=m+1
l=l+1
# temp = collections.Counter(frozenset(ele) for ele in pairs)
# ans = [temp[frozenset(ele)]==1 for ele in pairs]
# if len(ans)%2==0:
# response = int(len(ans)/2)
# else:
# response = int((len(ans)/2)+1)
#
# for x in range(response):
# print(ans[x])
print(li)
primepairs()
|
[
"shivankg96@gmail.com"
] |
shivankg96@gmail.com
|
244746f59dab7356af77d6b088d09be0109e7eea
|
5e76a420178dcb9008d6e4c12543ad0e3a50c289
|
/python/104.py
|
188ebec7d7866ddc2ac4ab6f887b025327467442
|
[] |
no_license
|
LichAmnesia/LeetCode
|
da6b3e883d542fbb3cae698a61750bd2c99658fe
|
e890bd480de93418ce10867085b52137be2caa7a
|
refs/heads/master
| 2020-12-25T14:22:58.125158
| 2017-07-18T06:44:53
| 2017-07-18T06:44:53
| 67,002,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
# -*- coding: utf-8 -*-
# @Author: Lich_Amnesia
# @Email: alwaysxiaop@gmail.com
# @Date: 2016-09-18 17:38:27
# @Last Modified time: 2016-09-18 17:41:20
# @FileName: 104.py
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
|
[
"lich@D-10-157-202-8.dhcp4.washington.edu"
] |
lich@D-10-157-202-8.dhcp4.washington.edu
|
ba877f1d961002ab0c0e83e9928982deed661a66
|
089e5c10201815ff5b44c4b1aa510f5745d43c13
|
/ThreadUtil.py
|
164bb25bfb019c158db26893fbf2b162a4011a33
|
[] |
no_license
|
Remaerdeno/Passer-zhihu
|
ebe1589b44dd388141224d47bcef8cb37239dece
|
8f60d6d3952df90dad92b095a1d6aae520e8eaf9
|
refs/heads/master
| 2021-06-17T15:47:51.668190
| 2017-06-07T13:11:01
| 2017-06-07T13:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
# encoding=utf8
import Util,urllib
from IpProxy import *
class ThreadDeco(object):
def __init__(self,func):
self._func = func
def __call__(self,*args):
self._func(*args)
proxy = Proxy()
while True:
if not args[0].empty():
page = args[0].get()
if page == Util.ENG_FLAG:
args[1].put(Util.ENG_FLAG)
break
while True:
try:
res = args[2].get(url=page,headers=Util.Default_Headers,timeout=2)
status_code = res.status_code
if status_code == 200:
args[1].put(res)
args[0].task_done()
break
elif status_code == 404:
break
elif status_code == 401 or status_code == 410:
break
else:
Util.PROXIES['https'] = proxy.archieve_activity_proxy()
except Exception as e:
Util.PROXIES['https'] = proxy.archieve_activity_proxy()
def init_thread(url,pageCount,url_queue,s):
ftype = url.split('/')[-1]
offset,count_page = 1,0
post_data = {'offset':offset,'limit':pageCount,'include':choose_include(ftype)}
answer_data = urllib.parse.urlencode(post_data)
proxy = Proxy()
while True:
try:
r = s.get(url='{}?{}'.format(url,answer_data),headers=Util.Default_Headers,timeout=2)
status_code = r.status_code
if status_code == 200:
count_page = (int(r.json()['paging']['totals'])-1)//pageCount + 1
for page in range(0,count_page):
post_data['offset'] = page*pageCount
answer_data = urllib.parse.urlencode(post_data)
url_queue.put('{}?{}'.format(url,answer_data))
break
elif status_code == 404:
break
elif status_code == 401 or status_code == 410:
break
else:
Util.PROXIES['https'] = proxy.archieve_activity_proxy()
except Exception as e:
Util.PROXIES['https'] = proxy.archieve_activity_proxy()
def choose_include(ftype):
if ftype == 'voters':
return 'data[*].answer_count,articles_count,follower_count,gender,is_followed,is_following,badge[?(type=best_answerer)].topics'
elif ftype == 'followees' or ftype == 'followers':
return 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'
elif ftype == 'favlists':
return 'data[*].updated_time,answer_count,follower_count,creator,is_following'
elif ftype == 'answers':
return 'data[*].is_normal,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,mark_infos,created_time,updated_time,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,upvoted_followees;data[*].author.badge[?(type=best_answerer)].topics'
elif ftype == 'following-columns':
return 'data[*].intro,followers,articles_count,image_url,image_width,image_height,is_following,last_article.created'
elif ftype == 'following-questions' or ftype == 'questions':
return 'data[*].created,answer_count,follower_count,author'
elif ftype == 'comments':
return 'data[*].author,collapsed,reply_to_author,disliked,content,voting,vote_count,is_parent_author,is_author'
else:
return ''
@ThreadDeco
def thread_queue(urlqueue,htmlqueue,session):
pass
|
[
"904727147@qq.com"
] |
904727147@qq.com
|
1af4a2a2ce07a8d901b6177b7776dc669f8f06fe
|
faf50edfb415f2c5232d3279cf9b6d3684d1bb39
|
/src/python/ml/eval_functions.py
|
939d893f862fccee2383548006d15267f6b62b37
|
[] |
no_license
|
ALFA-group/EEG_coma
|
aff66ed15f597bdde8583e11bec7d33210bab224
|
c2e65ab1d6491378c71520bc75827f8c1374715d
|
refs/heads/main
| 2023-04-23T18:42:17.519875
| 2021-05-04T21:22:09
| 2021-05-04T21:22:09
| 364,260,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,344
|
py
|
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import math
def encode(bursts, masks, encoder):
try:
if isinstance(bursts, np.ndarray):
bursts = torch.Tensor(bursts)
if isinstance(masks, np.ndarray):
masks = torch.Tensor(masks)
bursts = Variable(bursts)
masks = Variable(masks)
if torch.cuda.is_available():
bursts = bursts.cuda()
masks = masks.cuda()
burst_lens = masks.sum(dim=1)
max_seq_length = int(burst_lens.max().data.cpu().numpy()[0])
# trim bursts and masks to be padded only up to the max_seq_length
bursts_trimmed = bursts.narrow(1, 0, max_seq_length)
masks_trimmed = masks.narrow(1, 0, max_seq_length)
# print burst_lens.min().data.numpy()[0], burst_lens.max().data.numpy()[0]
enc_out, hidden, cell = encoder(bursts_trimmed)
except Exception as e:
return bursts, masks, None
print e
return enc_out, hidden, cell
def autoencode(inp_array, encoder, decoder, toss_encoder_output=False, reverse=False):
# inp_array - numpy array or tensor of burst data
# returns numpy array of output
if isinstance(inp_array, np.ndarray):
inp_array = torch.Tensor(inp_array)
inp_var = Variable(inp_array)
if torch.cuda.is_available():
inp_var = inp_var.cuda()
pad_length = inp_var.size(0)
enc_out, hidden, cell = encoder(inp_var.view(1,pad_length))
# output is size batch_size x pad_length x input_size
hidden_size = hidden.size(0)
output = decoder(hidden, cell, pad_length)
if toss_encoder_output:
output = decoder(torch.zeros_like(hidden), torch.zeros_like(cell), pad_length)
out_array = output.data.cpu().numpy()
out_array = out_array.reshape(pad_length)
if reverse:
return np.flip(out_array,axis=0).copy()
return out_array
def plot_autoencoding(sample, encoder, decoder, toss_encoder_output=False, reverse=False, undownsampled=None):
# sample is just an element of the dataset
# undownsampled is same as sample, except without downsampling. Used to plot the original, complete, un-downsampled
# burst for comparison with the output
seq_len = int(sample['mask'].sum())
inp_array = sample['burst'].cpu().numpy()
out_array = autoencode(inp_array, encoder, decoder, toss_encoder_output, reverse)
if undownsampled is None:
plt.plot(inp_array[:seq_len], label='Original burst')
plt.plot(out_array[:seq_len], label='Autoencoder output')
else:
unds_seq_len = int(undownsampled['mask'].sum())
unds_inp_array = undownsampled['burst'].cpu().numpy()
out_array_trim = out_array[:seq_len]
unds_inp_array_trim = unds_inp_array[:unds_seq_len]
out_xrange = np.arange(0,len(out_array_trim),1)
ds_factor = math.ceil(len(unds_inp_array_trim)/(0.0+len(out_array_trim)))
undownsampled_xrange = np.arange(0, len(out_array_trim), 1.0/ds_factor)[:len(unds_inp_array_trim)]
undownsampled_xrange = np.arange(0, len(unds_inp_array_trim), 1)
out_xrange = np.arange(0,len(unds_inp_array_trim),ds_factor)
plt.plot(undownsampled_xrange, unds_inp_array_trim, label='Original burst')
plt.plot(out_xrange, out_array_trim, label='Autoencder output')
plt.title('Original vs autoencoded')
plt.xlabel('Samples')
plt.ylabel('Signal')
plt.legend()
plt.show()
def get_mse(sample, encoder, decoder, toss_encoder_output=False, reverse=False):
# sample is just an element of the dataset
seq_len = int(sample['mask'].sum())
inp_array = sample['burst'].cpu().numpy()
out_array = autoencode(inp_array, encoder, decoder, toss_encoder_output, reverse)
loss_fn = torch.nn.MSELoss(reduce=False)
out_var = Variable(torch.Tensor(out_array))
burst_var = Variable(sample['burst'])
mask_var = Variable(sample['mask'])
if torch.cuda.is_available():
out_var = out_var.cuda()
burst_var = burst_var.cuda()
mask_var = mask_var.cuda()
mses = loss_fn(out_var, burst_var)
# avg_mses is size [batch_size], giving mse for each elt in batch
avg_mse = torch.sum(mses * mask_var) / seq_len
avg_mse = avg_mse.data.cpu().numpy()[0]
return avg_mse
|
[
"shash@mit.edu"
] |
shash@mit.edu
|
1d898f4d7db5808af12b3e9bd413033060f8403f
|
dfaf6f7ac83185c361c81e2e1efc09081bd9c891
|
/k8sdeployment/k8sstat/python/kubernetes/test/test_v1_local_object_reference.py
|
db02de623a1ffb63d799a47e9d655bb2206d76b9
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JeffYFHuang/gpuaccounting
|
d754efac2dffe108b591ea8722c831d979b68cda
|
2c63a63c571240561725847daf1a7f23f67e2088
|
refs/heads/master
| 2022-08-09T03:10:28.185083
| 2022-07-20T00:50:06
| 2022-07-20T00:50:06
| 245,053,008
| 0
| 0
|
MIT
| 2021-03-25T23:44:50
| 2020-03-05T02:44:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 994
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1LocalObjectReference(unittest.TestCase):
"""V1LocalObjectReference unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1LocalObjectReference(self):
"""Test V1LocalObjectReference"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_local_object_reference.V1LocalObjectReference() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"JeffYFHuang@github.com"
] |
JeffYFHuang@github.com
|
ecf6379021503cfa49fc77acfc14f9a1a5136758
|
4ebb06450c79980f5726bba62e1a61dad811c1da
|
/src/excerpt_server.py
|
1ae4afb3b92564a3a532b3c2a7613999be2257cc
|
[
"MIT"
] |
permissive
|
joy13975/covidprof_submission
|
5a2b1d89c0c53f6c11bed8669b865e8ae7209ae8
|
b3c7bb0ebf6fa1557edb8d1ca5d3d41377508e7d
|
refs/heads/main
| 2023-01-31T14:15:51.336935
| 2020-12-08T12:43:58
| 2020-12-08T12:43:58
| 314,133,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
import logging
import json
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
from excerpt_gen import ExcerptGen
from base.config_loader import ConfigLoader
class ExcerptServer(ConfigLoader):
def run(self, **kwargs):
# allow command line arguments to overwrite config
for k, v in kwargs.items():
setattr(self, k, v)
eg = ExcerptGen(accelerator=self.accelerator,
model_name=self.model_name)
class RequestHandler(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_POST(self):
# refuse to receive non-json content
ctype = self.headers['Content-Type']
if ctype != 'application/json':
logging.info(f'Got Content-Type={ctype}')
self.send_response(400)
self.end_headers()
return
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
logging.info(f'POST request,\nPath: {self.path}')
if self.path == '/get_excerpts':
response = self._get_excerpts(body)
elif self.path == '/get_excerpts_from_docs':
response = self._get_excerpts_from_docs(body)
else:
logging.info(f'Got path={self.path}')
self.send_response(400)
self.end_headers()
return
self._set_response()
self.wfile.write(json.dumps(response).encode('utf-8'))
def _get_excerpts(self, body):
message = json.loads(body)
question = message.get('question', '')
url = message.get(
'url', 'https://en.wikipedia.org/wiki/COVID-19_pandemic')
if question:
response = \
eg.get_excerpts(question, url=url)
else:
response = {'error': 'No question provided'}
return response
def _get_excerpts_from_docs(self, body):
message = json.loads(body)
question = message.get('question', '')
docs = message.get('docs', [])
if question and docs:
response = eg.get_excerpts_from_docs(question, docs)
else:
response = {
'error': (f'No question (len={len(question)}) or '
f'docs (len={len(docs)}) provided')
}
return response
httpd = HTTPServer((self.host, self.port), RequestHandler)
logging.info(f'Listening on {self.host}:{self.port}')
try:
httpd.serve_forever()
except KeyboardInterrupt:
logging.info('KeyboardInterrupt')
finally:
httpd.server_close()
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
s = ExcerptServer()
s.run(**dict(v.split('=') for v in sys.argv[1:]))
|
[
"joyyeh.tw@gmail.com"
] |
joyyeh.tw@gmail.com
|
6a5c1ea749f8102e0c5b36a0df88242b59dbcb09
|
96a62c5639154d9985148bce698362eb5be19735
|
/svr-2.7/arelle/PrototypeDtsObject.py
|
551bd298f0b3d1e1619e37f3bbadb514a22f77a6
|
[
"Apache-2.0"
] |
permissive
|
sternshus/not_arelle2.7
|
c535d35305af08a7d774be9bac8eeb68ba7c62bf
|
e2315999c514d6d30897168a98e5f343b06520f9
|
refs/heads/master
| 2020-04-14T07:27:43.615174
| 2016-06-20T21:16:02
| 2016-06-20T21:16:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,115
|
py
|
from arelle import XmlUtil, XbrlConst
from arelle.ModelValue import QName
from arelle.XmlValidate import VALID
from collections import defaultdict
import decimal, os
ModelDocument = None
class LinkPrototype(): # behaves like a ModelLink for relationship prototyping
def __init__(self, modelDocument, parent, qname, role):
self.modelDocument = modelDocument
self._parent = parent
self.modelXbrl = modelDocument.modelXbrl
self.qname = self.elementQname = qname
self.role = role
# children are arc and loc elements or prototypes
self.childElements = []
self.text = self.textValue = None
self.attributes = {u"{http://www.w3.org/1999/xlink}type":u"extended"}
if role:
self.attributes[u"{http://www.w3.org/1999/xlink}role"] = role
self.labeledResources = defaultdict(list)
def clear(self):
self.__dict__.clear() # dereference here, not an lxml object, don't use superclass clear()
def __iter__(self):
return iter(self.childElements)
def getparent(self):
return self._parent
def iterchildren(self):
return iter(self.childElements)
def get(self, key, default=None):
return self.attributes.get(key, default)
def __getitem(self, key):
return self.attributes[key]
class LocPrototype():
def __init__(self, modelDocument, parent, label, locObject, role=None):
self.modelDocument = modelDocument
self._parent = parent
self.modelXbrl = modelDocument.modelXbrl
self.qname = self.elementQname = XbrlConst.qnLinkLoc
self.text = self.textValue = None
# children are arc and loc elements or prototypes
self.attributes = {u"{http://www.w3.org/1999/xlink}type":u"locator",
u"{http://www.w3.org/1999/xlink}label":label}
# add an href if it is a 1.1 id
if isinstance(locObject,_STR_BASE): # it is an id
self.attributes[u"{http://www.w3.org/1999/xlink}href"] = u"#" + locObject
if role:
self.attributes[u"{http://www.w3.org/1999/xlink}role"] = role
self.locObject = locObject
def clear(self):
self.__dict__.clear() # dereference here, not an lxml object, don't use superclass clear()
@property
def xlinkLabel(self):
return self.attributes.get(u"{http://www.w3.org/1999/xlink}label")
def dereference(self):
if isinstance(self.locObject,_STR_BASE): # dereference by ID
return self.modelDocument.idObjects[self.locObject]
else: # it's an object pointer
return self.locObject
def getparent(self):
return self._parent
def get(self, key, default=None):
return self.attributes.get(key, default)
def __getitem(self, key):
return self.attributes[key]
class ArcPrototype():
def __init__(self, modelDocument, parent, qname, fromLabel, toLabel, linkrole, arcrole, order=u"1"):
self.modelDocument = modelDocument
self._parent = parent
self.modelXbrl = modelDocument.modelXbrl
self.qname = self.elementQname = qname
self.linkrole = linkrole
self.arcrole = arcrole
self.order = order
self.text = self.textValue = None
# children are arc and loc elements or prototypes
self.attributes = {u"{http://www.w3.org/1999/xlink}type":u"arc",
u"{http://www.w3.org/1999/xlink}from": fromLabel,
u"{http://www.w3.org/1999/xlink}to": toLabel,
u"{http://www.w3.org/1999/xlink}arcrole": arcrole}
# must look validated (because it can't really be validated)
self.xValid = VALID
self.xValue = self.sValue = None
self.xAttributes = {}
@property
def orderDecimal(self):
return decimal.Decimal(self.order)
def clear(self):
self.__dict__.clear() # dereference here, not an lxml object, don't use superclass clear()
def getparent(self):
return self._parent
def get(self, key, default=None):
return self.attributes.get(key, default)
def items(self):
return self.attributes.items()
def __getitem(self, key):
return self.attributes[key]
class DocumentPrototype():
def __init__(self, modelXbrl, uri, base=None, referringElement=None, isEntry=False, isDiscovered=False, isIncluded=None, namespace=None, reloadCache=False, **kwargs):
global ModelDocument
if ModelDocument is None:
from arelle import ModelDocument
self.modelXbrl = modelXbrl
self.skipDTS = modelXbrl.skipDTS
self.modelDocument = self
if referringElement is not None:
if referringElement.localName == u"schemaRef":
self.type = ModelDocument.Type.SCHEMA
elif referringElement.localName == u"linkbaseRef":
self.type = ModelDocument.Type.LINKBASE
else:
self.type = ModelDocument.Type.UnknownXML
else:
self.type = ModelDocument.Type.UnknownXML
normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(uri, base)
self.filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(normalizedUri, filenameOnly=True)
self.uri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(self.filepath)
self.basename = os.path.basename(self.filepath)
self.targetNamespace = None
self.referencesDocument = {}
self.hrefObjects = []
self.schemaLocationElements = set()
self.referencedNamespaces = set()
self.inDTS = False
self.xmlRootElement = None
def clear(self):
self.__dict__.clear() # dereference here, not an lxml object, don't use superclass clear()
|
[
"ndraper2@gmail.com"
] |
ndraper2@gmail.com
|
d301667e9da5f7d349fdf435dc6c5bdd2dd9d67e
|
46bd3e3ba590785cbffed5f044e69f1f9bafbce5
|
/env/lib/python3.8/site-packages/pip/_vendor/pep517/envbuild.py
|
7e6160fc539bc7bd382d6a660739256889eb380f
|
[] |
no_license
|
adamkluk/casper-getstarted
|
a6a6263f1547354de0e49ba2f1d57049a5fdec2b
|
01e846621b33f54ed3ec9b369e9de3872a97780d
|
refs/heads/master
| 2023-08-13T11:04:05.778228
| 2021-09-19T22:56:59
| 2021-09-19T22:56:59
| 408,036,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2dc493d0c01299c40d2ce16a0cfc43a12d648e4825c7c17a784868049f835a48
size 6112
|
[
"a.klukowski@live.com"
] |
a.klukowski@live.com
|
95606499c7800d62f8e508016262efca1b1262b1
|
92225c51f4d4ccf6330afc83eb8cebc9eda2d767
|
/mach_o/headers/prebind_cksum_command.py
|
1bde67456e66355c17e8e187f9f685e37f328299
|
[
"Apache-2.0"
] |
permissive
|
jeffli678/MachOTool
|
17f120b1dbec0f5b50d56b4bcd450ae5be08dad5
|
469c0fd06199356fcc6d68809c7ba15a12eac1fd
|
refs/heads/master
| 2020-07-02T01:23:21.048970
| 2015-10-31T09:21:59
| 2015-10-31T09:21:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
from utils.header import MagicField, Field
from load_command import LoadCommandHeader, LoadCommandCommand
class PrebindCksumCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_DYSYMTAB']: 'LC_DYSYMTAB'}),
Field('cmdsize', 'I'),
Field('cksum', 'I'),
)
def __init__(self, bytes_=None, **kwargs):
self.cksum = None
super(PrebindCksumCommand, self).__init__(bytes_, **kwargs)
|
[
"henrykwok2000@yahoo.com"
] |
henrykwok2000@yahoo.com
|
d4d5988e623e558b14497efc3dc84f1a4d62bd63
|
bd0a310924a6987250314f995be4d264731e6efa
|
/python_auto/part 2/part2_2.py
|
49740cee76e666254c3d9aa1174d4463242f4493
|
[] |
no_license
|
Maryks44/my_project
|
5696e4e8ba473947c13d48b98ef14a1c358de032
|
9ce7afd6dc4a027635cca803b0c7a09a85942a53
|
refs/heads/master
| 2023-06-24T06:21:30.709721
| 2021-07-30T15:35:19
| 2021-07-30T15:35:19
| 381,142,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
# Вывести на экран чикл N из звездоче. Причем каждая строка должна быть пронумерована и ровна колличеству звездочек
count = int(input('Укажите число N: '))
for i in range(1, count + 1):
print(i, '*' * i)
|
[
"maryks44m@yandex.ru"
] |
maryks44m@yandex.ru
|
20e72c8d12bd0c7a49dd9f63a286d85a7d31f4fe
|
15fb5db5c65b06303e3251f11bf2450e7bb1fa74
|
/Mac_address.py
|
5f1f47a3e7b393fc4dc88ed91322a8ce6141cc65
|
[] |
no_license
|
schirrecker/Codewars
|
27d18af79f29cb1b7d8980344f858e1ecb031274
|
e1ee5126488589c1213779480f6b066c85740f74
|
refs/heads/master
| 2022-01-14T09:08:47.617250
| 2022-01-10T05:49:24
| 2022-01-10T05:49:24
| 158,765,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
import time
from time import sleep
import os
import uuid
def get_mac():
mac_num = hex(uuid.getnode()).replace('0x', '').upper()
mac = '-'.join(mac_num[i: i + 2] for i in range(0, 11, 2))
return mac
print(uuid.getnode())
print(get_mac())
|
[
"noreply@github.com"
] |
schirrecker.noreply@github.com
|
131cac583fbecae810f6bf5d0f112c5dca2fef27
|
9894fe2e1a1777845f7625fdff21092ba7629c96
|
/src/functions/Preprocess.py
|
0006dce3b6a14abeb294f8ff4cc947b40515b967
|
[] |
no_license
|
Gion-KS/GSSL
|
443ef533779b682e87894109e5946fbde9550dbb
|
90207f29f45512760eb1141b5a484184095e9cff
|
refs/heads/master
| 2020-06-04T00:04:22.364907
| 2019-06-13T14:45:40
| 2019-06-13T14:45:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,586
|
py
|
import re
import nltk
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import string
from sklearn.datasets import fetch_20newsgroups
from src.classes import Dataset
from sklearn.feature_extraction.text import CountVectorizer
import src.functions.Vocabulary as voc
from src.classes.dataset import Dataset
# Preprocess the 20 Newsgroups data
def process(categories):
i = 0
while i < len(categories):
trainingdata = fetch_20newsgroups(subset='train',
remove=('headers', 'footers', 'quotes'),
categories=[categories[i]])
testdata = fetch_20newsgroups(subset='test',
remove=('headers', 'footers', 'quotes'),
categories=[categories[i]])
lemmatize_newsgroup(trainingdata, testdata, categories[i])
remove_stopwords(trainingdata)
remove_stopwords(testdata)
print_docs(trainingdata, testdata, categories[i])
i += 1
dataset = Dataset(categories)
dataset.load_preprocessed_V1(categories)
remove_frequent_and_infrequent_words(dataset.train)
print_docs_reduced_feature_count(dataset, categories)
print_v2_docs(categories)
print_v2_test_docs_vocabulary(categories)
# Determines how a word shall change to convert it to its base form
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
# Lemmatization of text in documents
def lemmatize_newsgroup(newsgroups_train, newsgroups_test, category):
i = 0
lemmatizer = WordNetLemmatizer()
size = len(newsgroups_train.data)
print("Lemmatization in progress...")
print(category + " training data: ", i, "/", size)
while i < len(newsgroups_train.data):
newsgroups_train.data[i] = newsgroups_train.data[i].lower()
newsgroups_train.data[i] = (" ".join([lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in
nltk.word_tokenize(newsgroups_train.data[i]) if w not in
string.punctuation]))
i += 1
print(category + " training data: ", i, "/", size)
size = len(newsgroups_test.data)
i = 0
print(category + " test data: ", i, "/", size)
while i < len(newsgroups_test.data):
newsgroups_test.data[i] = newsgroups_test.data[i].lower()
newsgroups_test.data[i] = (" ".join([lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in
nltk.word_tokenize(newsgroups_test.data[i]) if w not in
string.punctuation]))
i += 1
print(category + " test data: ", i, "/", size)
print("Lemmatization finished")
# prints the training and test documents of a category to a their respective file.
def print_docs(newsgroups_train, newsgroups_test, category):
i = 0
print("Printing docs...")
with open('../assets/20newsgroups/train/newsgroups_train_' + category + '.txt', 'w') as f:
while i < len(newsgroups_train.data):
f.write("%s\n" % newsgroups_train.data[i].encode("utf-8"))
i += 1
f.close()
i = 0
with open('../assets/20newsgroups/test/newsgroups_test_' + category + '.txt', 'w') as f:
while i < len(newsgroups_test.data):
f.write("%s\n" % newsgroups_test.data[i].encode("utf-8"))
i += 1
f.close()
print("Printing finished...")
# prints the training documents after frequent and infrequent words have been removed
def print_docs_reduced_feature_count(dataset, categories):
print("Printing docs...")
i = 0
train_category = []
while i < len(categories):
train_category.append([])
i += 1
i = 0
while i < len(dataset.train['data']):
c = 0
category = dataset.train['target_names'][dataset.train['target'][i]]
while c < len(categories):
if category == categories[c]:
train_category[c].append(dataset.train['data'][i])
break
c += 1
i += 1
i = 0
print("Docs sorted")
while i < len(train_category):
if len(train_category[i]) > 1:
with open('../assets/20newsgroups/train/newsgroups_train_' + categories[i] + '.txt', 'w') as f:
j = 0
while j < len(train_category[i]):
f.write("%s\n" % train_category[i][j])
j += 1
f.close()
i += 1
print("Printing finished...")
# prints a new version of the previously printed documents to a new file
# documents without content are not printed
def print_v2_docs(categories):
i = 0
removed_train = 0
removed_test = 0
print("Printing docs...")
while i < len(categories):
with open('../assets/20newsgroups/train2/newsgroups_train_' + categories[i] + '.txt', 'w') as f:
lines = [line.rstrip('\n') for line in open('../assets/20newsgroups/train/newsgroups_train_'
+ categories[i] + '.txt')]
j = 0
while j < len(lines):
lines[j] = re.sub(r'[^\w]', " ", lines[j])
lines[j] = re.sub(r'\b[a-zA-Z]\b', " ", lines[j])
lines[j] = re.sub(r'[ \t]+', " ", lines[j]) # remove extra space or tab
lines[j] = lines[j].strip() + "\n"
size = len(lines[j])
# lines[j] = lines[j][1:size]
if len(lines[j]) > 4:
f.write(lines[j])
else:
removed_train += 1
j += 1
f.close()
with open('../assets/20newsgroups/test2/newsgroups_test_' + categories[i] + '.txt', 'w') as f:
lines = [line.rstrip('\n') for line in open('../assets/20newsgroups/test/newsgroups_test_'
+ categories[i] + '.txt')]
j = 0
dataset = Dataset(categories)
vectorizer = CountVectorizer(stop_words=get_stopwords(), max_df=0.5, min_df=10)
vectors = vectorizer.fit_transform(dataset.train['data'])
vocabulary = vectorizer.vocabulary_
while j < len(lines):
lines[j] = re.sub(r'[^\w]', " ", lines[j])
lines[j] = re.sub(r'\b[a-zA-Z]\b', " ", lines[j])
lines[j] = re.sub(r'[ \t]+', " ", lines[j]) # remove extra space or tab
lines[j] = lines[j].strip() + "\n"
remove_doc = 1
words = lines[j].split()
for word in words:
if word in vocabulary.keys():
remove_doc = 0
break
size = len(lines[j])
# lines[j] = lines[j][1:size]
if len(lines[j]) > 4 and not remove_doc:
f.write(lines[j])
else:
removed_test += 1
j += 1
f.close()
i += 1
print("Printing finished")
print("Removed training doc:", removed_train)
print("Removed testing doc:", removed_test)
# same function as print_v2_docs but prints a new version of test docs which for when the vocabulary constructed
# using all the training documents are in use
def print_v2_test_docs_vocabulary(categories):
i = 0
removed_test = 0
print("Printing docs...")
while i < len(categories):
with open('../assets/20newsgroups/test2vocabulary/newsgroups_test_' + categories[i] + '.txt', 'w') as f:
lines = [line.rstrip('\n') for line in open('../assets/20newsgroups/test/newsgroups_test_'
+ categories[i] + '.txt')]
j = 0
dataset = Dataset(categories)
vectorizer = CountVectorizer(vocabulary=voc.get_vocabulary(categories))
vectors = vectorizer.fit_transform(dataset.train['data'])
vocabulary = vectorizer.vocabulary_
while j < len(lines):
lines[j] = re.sub(r'[^\w]', " ", lines[j])
lines[j] = re.sub(r'\b[a-zA-Z]\b', " ", lines[j])
lines[j] = re.sub(r'[ \t]+', " ", lines[j]) # remove extra space or tab
lines[j] = lines[j].strip() + "\n"
remove_doc = 1
words = lines[j].split()
for word in words:
if word in vocabulary.keys():
remove_doc = 0
break
size = len(lines[j])
# lines[j] = lines[j][1:size]
if len(lines[j]) > 4 and not remove_doc:
f.write(lines[j])
else:
removed_test += 1
j += 1
f.close()
i += 1
print("Printing finished")
print("Removed testing doc:", removed_test)
# same as print_v2_test_docs_vocabulary but for when the runtime vocabulary are in use
def print_v2_test_docs_vocabulary_labeled(categories):
i = 0
removed_test = 0
print("Printing docs...")
while i < len(categories):
with open('../assets/20newsgroups/test2vocabulary_labeled/newsgroups_test_' + categories[i] + '.txt', 'w') as f:
lines = [line.rstrip('\n') for line in open('../assets/20newsgroups/test/newsgroups_test_'
+ categories[i] + '.txt')]
j = 0
dataset = Dataset(categories)
vectorizer = CountVectorizer(vocabulary=voc.get_vocabulary_only_labeled(categories))
vectors = vectorizer.fit_transform(dataset.train['data'])
vocabulary = vectorizer.vocabulary_
while j < len(lines):
lines[j] = re.sub(r'[^\w]', " ", lines[j])
lines[j] = re.sub(r'\b[a-zA-Z]\b', " ", lines[j])
lines[j] = re.sub(r'[ \t]+', " ", lines[j]) # remove extra space or tab
lines[j] = lines[j].strip() + "\n"
remove_doc = 1
words = lines[j].split()
for word in words:
if word in vocabulary.keys():
remove_doc = 0
break
size = len(lines[j])
# lines[j] = lines[j][1:size]
if len(lines[j]) > 4 and not remove_doc:
f.write(lines[j])
else:
removed_test += 1
j += 1
f.close()
i += 1
print("Printing finished")
print("Removed testing doc:", removed_test)
# get stopwords from file
def get_stopwords():
f = open('../assets/stopwords.txt') # https://github.com/suzanv/termprofiling/blob/master/stoplist.txt
x = f.read().split("\n")
f.close()
return x
# removes words with which occur in less than 10 document and more than 50%
def remove_frequent_and_infrequent_words(newsgroup):
vectorizer = CountVectorizer(max_df=0.5, min_df=10)
vectors = vectorizer.fit_transform(newsgroup['data'])
vocabulary = voc.get_top_n_words(newsgroup['data'], len(vectorizer.vocabulary_))
vectorizer = CountVectorizer()
vectors = vectorizer.fit_transform(newsgroup['data'])
vocabulary_with_freq_and_infreq = voc.get_top_n_words(newsgroup['data'], len(vectorizer.vocabulary_))
i = 0
while i < len(vocabulary_with_freq_and_infreq):
vocabulary_with_freq_and_infreq[i] = vocabulary_with_freq_and_infreq[i][0]
if i < len(vocabulary):
vocabulary[i] = vocabulary[i][0]
i += 1
print(len(vocabulary))
print(len(vocabulary_with_freq_and_infreq))
remove = []
i = 0
while i < len(vocabulary_with_freq_and_infreq):
if vocabulary_with_freq_and_infreq[i] not in vocabulary:
remove.append(vocabulary_with_freq_and_infreq[i])
i += 1
remove = "|".join(remove)
i = 0
while i < len(newsgroup['data']):
newsgroup['data'][i] = re.sub(r'\b(' + remove + ')\s', ' ', newsgroup['data'][i])
i += 1
print("Document: ", i, "/", len(newsgroup['data']))
"""
while i < len(vocabulary_with_freq_and_infreq):
j = 0
if vocabulary_with_freq_and_infreq[i] not in vocabulary:
while j < len(newsgroup['data']):
newsgroup['data'][j] = re.sub(r'\b' + vocabulary_with_freq_and_infreq[i] + '\s', ' ',
newsgroup['data'][j])
j += 1
i += 1
print("Freq/Infreq: ", i, "/", len(vocabulary_with_freq_and_infreq))
"""
# fetches the most frequent words from the documents
def get_top_n_words(documents, nbr_of_top_words=None):
vec = CountVectorizer().fit(documents.data)
bag_of_words = vec.transform(documents.data)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:nbr_of_top_words]
# removes stopwords from newsgroup
def remove_stopwords(newsgroup):
print("Removal in progress...")
remove_regex_words(newsgroup)
i = 0
with open('../assets/stopwords.txt', 'r') as f:
words = f.read().split("\n")
while i < len(words):
j = 0
while j < len(newsgroup.data):
newsgroup.data[j] = re.sub(r'\b' + words[i] + '\s', ' ', newsgroup.data[j])
j += 1
i += 1
f.close()
print("Stopwords removed")
# remove stopwords containing a ' using regex.
def remove_regex_words(newsgroup):
i = 0
with open('../assets/stopwords_regex.txt', 'r') as f:
words = f.read().split("\n")
print("Regex in progress...")
while i < len(words):
j = 0
while j < len(newsgroup.data):
newsgroup.data[j] = re.sub(r'[^\w,\']', " ", newsgroup.data[j]) # removes special characters except '
newsgroup.data[j] = re.sub(r'\b' + words[i] + '\s', ' ', newsgroup.data[j])
newsgroup.data[j] = re.sub(r'[^\w]', " ", newsgroup.data[j]) # removes special characters
j += 1
i += 1
f.close()
print("Regex finished")
|
[
"LUCBO@users.noreply.github.com"
] |
LUCBO@users.noreply.github.com
|
622684ec5c306509536391a79ee86352bd8df45f
|
088cc2a6e03aedea5f86372fc23f646836e6b9eb
|
/data/migrations/0008_auto_20201113_1031.py
|
ab1c23632751618ca4963f85913725bff8c55a8a
|
[] |
no_license
|
EliSEstes/Air-Quality-Forum
|
6cd2055151d9523a02de847ab5440f25cb52f128
|
f3fbd95ed0a9711d9a8fe5cc72942d38fec5ebdd
|
refs/heads/main
| 2023-03-31T23:10:00.985683
| 2021-03-23T02:23:36
| 2021-03-23T02:23:36
| 348,881,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
# Generated by Django 3.1.1 on 2020-11-13 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0007_city_humidity'),
]
operations = [
migrations.RemoveField(
model_name='city',
name='maxCo2',
),
migrations.RemoveField(
model_name='city',
name='maxSo2',
),
migrations.RemoveField(
model_name='city',
name='minCo2',
),
migrations.RemoveField(
model_name='city',
name='minSo2',
),
migrations.AddField(
model_name='city',
name='n',
field=models.IntegerField(default='000'),
),
migrations.AddField(
model_name='city',
name='pm10',
field=models.IntegerField(default='000'),
),
migrations.AddField(
model_name='city',
name='pm25',
field=models.IntegerField(default='000'),
),
]
|
[
"noreply@github.com"
] |
EliSEstes.noreply@github.com
|
36fff049a68fa9e7fb10cd09b31ccfc987e4d16b
|
7a3d56fac035f2de9ed94ccb852293ea9e643aee
|
/PortfolioGenerator/Library/DataClass.py
|
4374cafd40bd3112d75f2a83086db26f1d4dea99
|
[] |
no_license
|
DT021/MoneyTree
|
612d7d811a2f8109b9e369d906f45920fcc7e935
|
064600fec4c5664dd191c4f203284ca05534640e
|
refs/heads/master
| 2022-11-06T03:47:16.014120
| 2020-06-18T21:27:13
| 2020-06-18T21:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,376
|
py
|
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from typing import List
from enum import Enum
import datetime
import pandas as pd
### Dataclasses used internally for generating portfolios and risk calculations ###
@dataclass()
class Asset():
Ticker: str #asset ticker symbol
Name: str #full name
AssetType: str # type of asset stocks, bonds, crypto ..
PriceHistoryFile: str #absolute path location of CSV file with historical data of stock
LastPrice: float # latest price of asset, used for generating portfolio to ensure correct allocation
AssetData: pd.DataFrame
@dataclass
class Universe():
count: int
UniverseSet: List[Asset]
UniverseStartDate: str
UniverseEndDate: str
@dataclass()
class Portfolio():
UserID: int # unique identifier for user to know who's portfolio it is
BuyingPower: float # Money available for this user that has not being invested yet
assets: List[Asset] # a list of stocks in this portfolio
AssetAlloc: dict # weight distribution each asset in the portfolio
AssetShares: dict # number of shares of each asset in the portfolio
### User Database data classes for messaging ###
class UDRequestType(Enum):
Holding = 0
Portfolio = 1
PortfolioHistory = 2
User = 3
class UDOperation(Enum):
Insert = 0
Read = 1
Update = 2
Delete = 3
class Risk(Enum):
Low = 0
Med = 1
High = 2
@dataclass_json()
@dataclass()
class UDMHolding():
Id: int # unique ID
PortfolioId: int
Name: str # full name of asset
Abbreviation: str # ticker
Description: str # notes on asset
Quantity: int # number of assets currently owned of this asset
@dataclass_json()
@dataclass()
class UDMPortfolio():
Id: int # unique ID
OwnerId: int
Active: bool # portfolio live or not in use
Generated: str # date portfolio was generated by portfolio generator
InitialValue: float # initial total value of portfolio
StopValue: float
DesiredRisk: Risk # low,medium, or high
Holding: List[UDMHolding] # list of assets in portfolio
@dataclass_json()
@dataclass()
class UDMPortfolioHistory():
Id: int # unique id
PortfolioId: int
Date: str # date of instance
Valuation: float # value of portfolio
Risk: float # low, medium, or high
ActionTakenId: int # buy, sell, hold ...
@dataclass_json()
@dataclass()
class UDMUser():
Email: str
FirstName: str
LastName: str
BrokerageAccount: str
@dataclass_json()
@dataclass()
class UDMRequest():
Email: str
objectID: int
RequestType: UDRequestType # request info for database
Operation: UDOperation # database operation for this portfolio
Holding: UDMHolding
Portfolio: List[UDMPortfolio] # list of portfolios to go in request
PortfolioHistory: List[List[UDMPortfolioHistory]]
User: UDMUser # User these portfolios are for
#### Portfolio generator data classes for messaging ######
class PGRequestType(Enum):
Generate = 0
BackTestResults = 1
@dataclass_json()
@dataclass()
class PGRequest():
PGMessageType: int
# back test elements
|
[
"matthewsages@gmail.com"
] |
matthewsages@gmail.com
|
096663cd9cbae0fabe082fe91e8332f3900bd763
|
d95e3c7cd912a2eab479bbe66e747e4709edf2d0
|
/bs_scrape.spec
|
ba36aa3a232605334babf932789b283ca0fb76c4
|
[] |
no_license
|
jaked842/ammoscrape
|
91666e0166156c5a9dc1e3c7f64a3091b05ff3e4
|
e09a01508cc110a8685f075e1ef75460caaaeaff
|
refs/heads/main
| 2023-06-03T22:57:55.615450
| 2021-06-24T18:01:34
| 2021-06-24T18:01:34
| 379,684,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['bs_scrape.py'],
pathex=['/Users/admin/Downloads/bsoup'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='bs_scrape',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
|
[
"admin@MacBook-Pro.local"
] |
admin@MacBook-Pro.local
|
bab5556638f533d64d9c39fd6e03374ca346dcbd
|
9c2e87292aa0892e621c67b8f335f569e457837e
|
/problem_12/main.py
|
c3fd712667a53a7f074c89d5bf13a929d3cffa19
|
[] |
no_license
|
scirelli/dailycodingproblem.com
|
66253198e738ec7dc8f741e3230ffa8fbed1ecf7
|
3f84e5a19dada4fd69c0e8fc02f930c8531193c0
|
refs/heads/master
| 2023-08-05T08:50:59.569552
| 2023-07-20T13:11:48
| 2023-07-20T14:08:05
| 159,087,800
| 0
| 0
| null | 2023-07-20T14:08:06
| 2018-11-26T00:20:24
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
py
|
#!/usr/bin/env python3
def unique_climbs(N, X={1, 2}):
"""
Brought force method. Try out all possible combinations.
"""
result = 0
if N == 0:
return 1
if N < 0:
return 0
for cnt in X:
result += unique_climbs(N-cnt, X)
return result
print(unique_climbs(4))
print(unique_climbs(4, {1, 3, 5}))
print(unique_climbs(4, {1}))
print(unique_climbs(5, {1, 2}))
|
[
"stephen.cirelli@capitalone.com"
] |
stephen.cirelli@capitalone.com
|
45be357e7b38ec918dbbec6c0e106c1fd5ccafbb
|
91578595b5aa689b277827860141c60ca26753a4
|
/src/State.py
|
6315ad42a83966bbb8cf9d922bf9347d459a5c60
|
[
"BSD-3-Clause"
] |
permissive
|
lnerit/ktailFSM
|
bfb99378ea2e23d0a392cf88e43e2892c2734a02
|
abed58d97aad87a1b6eb7f062cd42a7256c55306
|
refs/heads/master
| 2021-01-10T08:09:59.412708
| 2016-02-21T05:24:21
| 2016-02-21T05:24:21
| 49,031,698
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
'''
Created on 15/12/2015
@author: lenz
'''
MACHINES = dict()
NOOP = lambda: None
NOOP_ARG = lambda arg: None
class FSMError(Exception):
"""Base FSM exception."""
pass
class StateError(FSMError):
"""State manipulation error."""
class State(dict):
"""State class."""
DOT_ATTRS = {
'shape': 'circle',
'height': '1.2',
}
DOT_ACCEPTING = 'doublecircle'
def __init__(self, name, initial=False, accepting=False, output=None,
on_entry=NOOP, on_exit=NOOP, on_input=NOOP_ARG,
on_transition=NOOP_ARG, machine=None, default=None):
"""Construct a state."""
dict.__init__(self)
self.name = name
self.entry_action = on_entry
self.exit_action = on_exit
self.input_action = on_input
self.transition_action = on_transition
self.output_values = [(None, output)]
self.default_transition = default
if machine is None:
try:
machine = MACHINES['default']
except KeyError:
pass
if machine:
machine.states.append(self)
if accepting:
try:
machine.accepting_states.append(self)
except AttributeError:
raise StateError('The %r %s does not support accepting '
'states.' % (machine.name,
machine.__class__.__name__))
if initial:
machine.init_state = self
def __getitem__(self, input_value):
"""Make a transition to the next state."""
next_state = dict.__getitem__(self, input_value)
self.input_action(input_value)
self.exit_action()
self.transition_action(next_state)
next_state.entry_action()
return next_state
def __setitem__(self, input_value, next_state):
"""Set a transition to a new state."""
if not isinstance(next_state, State):
raise StateError('A state must transition to another state,'
' got %r instead.' % next_state)
if isinstance(input_value, tuple):
input_value, output_value = input_value
self.output_values.append((input_value, output_value))
dict.__setitem__(self, input_value, next_state)
def __repr__(self):
"""Represent the object in a string."""
return '<%r %s @ 0x%x>' % (self.name, self.__class__.__name__, id(self))
|
[
"lewelnerit@gmail.com"
] |
lewelnerit@gmail.com
|
7be171b3c6ccd20d4e7c354d4e4620d1a88c649d
|
fa1faa5c480ba249fbec18c0fb79b696d6b4bdf9
|
/4 - Arrays/RemoveKDigits.py
|
2c3dd044de47a9f8f777661c108947dbbc7b6b7f
|
[] |
no_license
|
AbhiniveshP/CodeBreakersCode
|
10dad44c82be352d7e984ba6b7296a7324f01713
|
7dabfe9392d74ec65a5811271b5b0845c3667848
|
refs/heads/master
| 2022-11-14T11:58:24.364934
| 2020-07-11T22:34:04
| 2020-07-11T22:34:04
| 268,859,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
class Solution:
# Time: O(N) --> a max of double visit
# Space: O(N)
def removeKdigits(self, num: str, k: int) -> str:
stack = []
# before pushing a digit to stack, take care that it is monotonically increasing stack, also k > 0 and stack not empty
for i in range(len(num)):
currentNumber = int(num[i])
while (len(stack) > 0 and k > 0 and currentNumber < stack[-1]):
stack.pop()
k -= 1
stack.append(currentNumber)
# as stack is monotonically increasing => we can pop all lastly added elements until k <= 0
while (k > 0):
stack.pop()
k -= 1
# remove all leading zeros
cursor = 0
while (cursor < len(stack)):
if (stack[cursor] != 0):
break
cursor += 1
stack = stack[cursor:]
# edge case
if (len(stack) == 0):
return '0'
# now join the stack again
return ''.join([str(n) for n in stack])
|
[
"pabhinivesh@gmail.com"
] |
pabhinivesh@gmail.com
|
5629815332356a23d2c3641637248dc89de4587e
|
a535ca0344d629837ab8f75b51c7d7e4eba91fca
|
/res/bin/usbtoh.py
|
928ab934251e71fdf57d7dfd5dc08bf9c8b96d82
|
[
"Zlib"
] |
permissive
|
TheHoodedFoot/SpaceLCD
|
f2661433ee3dd917e5bd0d28f58f4a4020d43ef3
|
52a8409c7b83a98c8200bddecbb17e9371349294
|
refs/heads/master
| 2023-01-18T15:39:52.147606
| 2020-12-13T13:53:15
| 2020-12-13T13:53:15
| 320,777,918
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
#!/usr/bin/env python3
import sys
import re
# Output header
out = "static const unsigned char bitmap[] = {"
char_count = 0
line_count = 0
things = ""
things_size = 0
for line in sys.stdin:
x = re.search(".*0x00(..): (.*)$", line)
if x is not None:
buffer = x.group(2) + " "
for word in re.findall(".... ", buffer):
things += "0x" + word[:2] + ", 0x" + word[2:4] + ", "
things_size += 2
# print(things)
char_count += 2
if int(x.group(1)) == 30:
# print(buffer)
out += things[:-2] + ","
things = ""
things_size = 0
line_count += 1
# buffer = ""
# Footer
out += "};"
print("#define BITMAP_PACKET_SIZE 64")
print("#define BITMAP_PACKETS " + str(line_count))
print(out)
if things != "":
print("#define BITMAP_THINGS_SIZE " + str(things_size))
print("static const unsigned char bitmap_footer[] = {")
print(things)
print("};")
else:
print("#define BITMAP_THINGS_SIZE 0")
print("static const unsigned char bitmap_footer[] = { 0x00 };")
|
[
"thf@thehoodedfoot.com"
] |
thf@thehoodedfoot.com
|
08e5d1a22b5e130fdb0e5d1100450e88632081a4
|
726a25e7bdc6e12645afcd00db693f2b561a4daa
|
/youtubeNet/load_YouTubeNet_model_to_predict.py
|
e2ac6deb62438f08a13aed14eef3517132a6ed62
|
[] |
no_license
|
13483910551/deep_ctr
|
5e6d6a24eac12422118890142b6ecf61a1e8df43
|
239abcb3a93dbd296c90ff827a9f761d0228f708
|
refs/heads/master
| 2022-11-12T06:04:13.557727
| 2020-06-29T10:44:55
| 2020-06-29T10:44:55
| 276,299,940
| 1
| 1
| null | 2020-07-01T06:50:54
| 2020-07-01T06:50:53
| null |
UTF-8
|
Python
| false
| false
| 3,344
|
py
|
#-*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Model
from YouTubeNet import YouTubeNet
from data_generator import init_output
# 1. Load model
re_model = YouTubeNet()
re_model.load_weights('YouTubeNet_model.h5')
# 2. Load data
user_id, gender, age, occupation, zip, \
hist_movie_id, hist_len, pos_movie_id, neg_movie_id = init_output()
with open("test.txt", 'r') as f:
for line in f.readlines():
buf = line.strip().split('\t')
user_id.append(int(buf[0]))
gender.append(int(buf[1]))
age.append(int(buf[2]))
occupation.append(int(buf[3]))
zip.append(int(buf[4]))
hist_movie_id.append(np.array([int(i) for i in buf[5].strip().split(",")]))
hist_len.append(int(buf[6]))
pos_movie_id.append(int(buf[7]))
user_id = np.array(user_id, dtype='int32')
gender = np.array(gender, dtype='int32')
age = np.array(age, dtype='int32')
occupation = np.array(occupation, dtype='int32')
zip = np.array(zip, dtype='int32')
hist_movie_id = np.array(hist_movie_id, dtype='int32')
hist_len = np.array(hist_len, dtype='int32')
pos_movie_id = np.array(pos_movie_id, dtype='int32')
# 3. Generate user features for testing and full item features for retrieval
test_user_model_input = [user_id, gender, age, occupation, zip, hist_movie_id, hist_len]
all_item_model_input = list(range(0, 3706+1))
user_embedding_model = Model(inputs=re_model.user_input, outputs=re_model.user_embedding)
item_embedding_model = Model(inputs=re_model.item_input, outputs=re_model.item_embedding)
user_embs = user_embedding_model.predict(test_user_model_input)
item_embs = item_embedding_model.predict(all_item_model_input, batch_size=2 ** 12)
print(user_embs.shape)
print(item_embs.shape)
user_embs = np.reshape(user_embs, (-1, 64))
item_embs = np.reshape(item_embs, (-1, 64))
print(user_embs[:2])
"""
(6040, 1, 64)
(3707, 1, 64)
[[0. 0.84161407 0.5913373 1.4273984 0.3627409 0.3708319
0. 0. 1.1993251 2.023305 0. 0.
0. 0. 1.7670951 0.558543 1.0881244 1.7819335
0.6492757 2.6123888 0.3125449 0.36506268 0. 1.1256831
4.410721 1.7535956 0.52042466 1.4845431 0.4248005 0.
2.1689777 1.296214 1.1852415 0. 0. 0.43460703
1.927466 5.7313547 0. 0. 0. 0.36566824
2.012046 0. 0. 1.5223947 3.8016186 0.
0.34814402 1.909086 1.8206354 0.39664558 1.0465539 0.
1.8064818 0. 1.3177121 0.5385138 0. 2.6539533
0. 0. 0. 0. ]
[0.8107976 1.1632944 0. 0.53690577 1.0428483 1.2018232
3.4726145 2.21235 0. 0.1572555 0.97843236 0.
0. 0.99380946 0.76257807 0.05231025 1.6611706 0.0405544
0.9629851 1.3969578 1.9982753 0. 0.1676663 0.
0. 0.07090688 2.1441605 0.5842841 0.09379 0.
0. 0. 0.49283475 2.134187 0. 0.8167961
0. 0. 1.8054122 0. 0. 1.266642
2.730833 0. 0. 0.5958151 0. 1.2587492
0.08325796 0. 0.22326717 0.6559374 0.54102665 0.
1.0489423 0. 0.5308376 0.62447524 0. 0.
2.3295872 0. 2.5632188 1.3600256 ]]
(3707, 64)
"""
|
[
"wangdehua@360buyad.local"
] |
wangdehua@360buyad.local
|
27765d93a89279793b38eb5ecee621db296f5941
|
1ad7addd3cf4d29130a09eb379dbce572a8351b8
|
/Github-Spider/dummy_spider1.py
|
8fec7d7ec4e810cebce2920ef19399e81927d75c
|
[] |
no_license
|
parasKumarSahu/KML
|
9f23e9d02303eba00f95966d95097aa7bf0b001c
|
385370ca4c631ed99d5f352ff31b3fa40c9007ea
|
refs/heads/master
| 2020-04-17T14:01:00.394848
| 2018-11-30T23:04:55
| 2018-11-30T23:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import scrapy
f = open('haha.txt', "w")
# 'https://github.com/jquery/jquery/commits/master',
# 'https://github.com/codeschool-projects/HelloCodeSchoolProject/commits/master'
class DummySpider(scrapy.Spider):
name = "dummy"
start_urls = [
'https://github.com/codeschool-projects/HelloCodeSchoolProject/commits/master'
]
download_delay = 0.05
def doit(self, response):
arr = response.xpath('//tr//span/text()').extract()
string = ""
for stri in arr:
string += stri
yield {
'insider': string,
}
def parse(self, response):
for listitem in response.xpath('//li[@class="commit commits-list-item js-commits-list-item table-list-item js-navigation-item js-details-container Details js-socket-channel js-updatable-content"]'):
rel_url = listitem.xpath('div/div/a/@href').extract_first()
complete_url = response.urljoin(rel_url)
yield {
'RevisionId': complete_url,
'TimeStamp': listitem.xpath('div/div/div/relative-time/@datetime').extract_first(),
'Contributors': listitem.xpath('div/div/div/a/text()').extract_first(),
'EditDetails': listitem.xpath('div/p/a/text()').extract_first(),
}
f.write(complete_url + '\n')
#response.follow(rel_url, self.doit)
next_page = response.xpath('//div/div/div/div/div/div/div/a/@href')[-1].extract()
if next_page is not None:
yield response.follow(next_page, self.parse)
|
[
"2016csb1047@iitrpr.ac.in"
] |
2016csb1047@iitrpr.ac.in
|
25265341286876db617a7ceabe8e9353a1421c8d
|
64a82fb1ac6ff3e1ec0c9f9306f4eeafca01837e
|
/users/migrations/0015_auto_20210607_1909.py
|
cf197dc2584939f2a37101c62243b4bcfa80d3d8
|
[] |
no_license
|
DiabCh/Online-Store-Project
|
54b6e1aad7563471b81eade81f1daf2baeec19b8
|
065302fb9e363a1645bff2e516248a22fb49f9db
|
refs/heads/main
| 2023-07-15T01:53:36.611256
| 2021-08-20T10:44:59
| 2021-08-20T10:44:59
| 398,246,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# Generated by Django 3.1.7 on 2021-06-07 19:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0014_auto_20210607_0735'),
]
operations = [
migrations.AlterField(
model_name='activation',
name='expires_at',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 7, 19, 39, 12, 604098)),
),
migrations.AlterField(
model_name='activation',
name='token',
field=models.CharField(default='49daf8d2be3aafdb41704c2f3851e8e6169ec344814420b22964be7d1a25e072', max_length=64),
),
]
|
[
"chraifdiab@gmail.com"
] |
chraifdiab@gmail.com
|
b3fcc969062efca70aa4b4866ce7e8189b6d948a
|
8c953249a62367a8f0138eff488ce5d510e65620
|
/cs109/lab1OldClone/rpnc.py
|
5a6a89132f900087c2e0ed5306b2da657c50dcb0
|
[] |
no_license
|
nahawtho/cs109copy
|
5ce87799eb09ac22b38f63b8d77b3878f8b5b001
|
66e2b2be6adf54bf724a437f2a32090a1d834cba
|
refs/heads/master
| 2020-04-05T06:50:35.063576
| 2018-11-08T05:10:56
| 2018-11-08T05:10:56
| 156,653,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
#
# Python3 Reverse Polish Notation (RPN) Calculator
#
# Accepts an single argument in RPN, evaluates it and prints the answer to stdout.
#
print("not implemented")
|
[
"nahawtho@thor.soe.ucsc.edu"
] |
nahawtho@thor.soe.ucsc.edu
|
bfae83b275fbfae46d9026de5ef5217a6f90a547
|
872a6279df8f6c8786002f9012cad3ca2d99bcb3
|
/countries/admin.py
|
0e838f57e0febdd25e4eaac3c191a25a229c68a2
|
[] |
no_license
|
ndanield/countries_list
|
50611cffd22a8c3c59c5e4972c4785db21663e94
|
532334dc6767182479501a189ac797997fb3efbc
|
refs/heads/master
| 2021-01-10T07:05:34.679445
| 2015-12-02T05:19:32
| 2015-12-02T05:19:32
| 47,042,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from django.contrib import admin
from .models import Country
class CountryAdmin(admin.ModelAdmin):
list_display = ['code', 'name', 'continent', 'region', 'population']
admin.site.register(Country, CountryAdmin)
|
[
"Nelson Daniel Durán Morel"
] |
Nelson Daniel Durán Morel
|
20900db7b1b8044e1bf0b27b91907868005a426c
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayInsSceneSellerActivitySignModel.py
|
4ef2bcff18867f0f8ba427a6a7c71a574c386b9c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,623
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneSellerActivitySignModel(object):
def __init__(self):
self._biz_data = None
self._channel_account_id = None
self._channel_account_type = None
self._sp_code = None
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def channel_account_id(self):
return self._channel_account_id
@channel_account_id.setter
def channel_account_id(self, value):
self._channel_account_id = value
@property
def channel_account_type(self):
return self._channel_account_type
@channel_account_type.setter
def channel_account_type(self, value):
self._channel_account_type = value
@property
def sp_code(self):
return self._sp_code
@sp_code.setter
def sp_code(self, value):
self._sp_code = value
def to_alipay_dict(self):
params = dict()
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.channel_account_id:
if hasattr(self.channel_account_id, 'to_alipay_dict'):
params['channel_account_id'] = self.channel_account_id.to_alipay_dict()
else:
params['channel_account_id'] = self.channel_account_id
if self.channel_account_type:
if hasattr(self.channel_account_type, 'to_alipay_dict'):
params['channel_account_type'] = self.channel_account_type.to_alipay_dict()
else:
params['channel_account_type'] = self.channel_account_type
if self.sp_code:
if hasattr(self.sp_code, 'to_alipay_dict'):
params['sp_code'] = self.sp_code.to_alipay_dict()
else:
params['sp_code'] = self.sp_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneSellerActivitySignModel()
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'channel_account_id' in d:
o.channel_account_id = d['channel_account_id']
if 'channel_account_type' in d:
o.channel_account_type = d['channel_account_type']
if 'sp_code' in d:
o.sp_code = d['sp_code']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
9e2990da4d26978bf83a7eada161aa6270a0f17e
|
8084a7e3289d4ae530ce5808b2c19323b34d6289
|
/invoices/nfe.py
|
df183a009323f01b74ba45110d0bcc130eb28bd1
|
[
"MIT"
] |
permissive
|
lclpsoz/misc-scripts
|
c05f16d128d4b4eee8d7ccf22332831627ee681d
|
7834292011bf47e0f7272485e2f440b68a2c6414
|
refs/heads/main
| 2023-08-28T20:23:42.635825
| 2021-11-13T21:28:53
| 2021-11-13T21:28:53
| 387,044,544
| 0
| 0
|
MIT
| 2021-11-13T21:28:54
| 2021-07-17T22:06:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,761
|
py
|
#%% Request
import requests
from bs4 import BeautifulSoup
import re
nfe_code = input('Input nf-e code: ')
nfe_code = re.sub('\D', '', nfe_code)
url_qrcode = 'http://www.nfce.se.gov.br/portal/qrcode.jsp?p=' + nfe_code + '|2|1|3|bb608455a1c917b0cb910034688a4fa65f851089'
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1"
}
print('Making request...')
try:
req = requests.post(url=url_qrcode, headers=headers, timeout=2)
print('\t', req, sep='')
except:
print('\tProblem with the request. Probably nf-e is not available yet.')
print('\tTry to go directly to the nfe page:', url_qrcode)
#%% Process
html_doc = req.text
soup = BeautifulSoup(html_doc, 'lxml')
tags = {
'name': 'txtTit',
'id': 'RCod',
'amount': 'Rqtd',
'price_unit': 'RvlUnit'
}
assigned = {}
res = input('Amount of assigned rows to be read, separated by | (0 or empty for no):')
if len(res) > 0 and int(res) > 0:
for i in range(int(res)):
row = input()
id, assigned_str = row.split('|')
if id in assigned and assigned[id] != assigned_str:
print('Code already register and different. Old:', assigned[id])
assigned[id] = assigned_str
total = 0
products = {}
for row in soup.find_all('td'):
if len(row.find_all('span', {'class': 'txtTit'})) > 0:
name = row.find_all('span', {'class': tags['name']})[0].string
id = row.find_all('span', {'class': tags['id']})[0].string.split(' ')[1].replace(')', '')
amount = row.find_all('span', {'class': tags['amount']})[0].text.split(':')[1].replace(',', '.')
price_unit = row.find_all('span', {'class': tags['price_unit']})[0].text.split('\xa0')[-1].replace(',', '.')
amount, price_unit = float(amount), float(price_unit)
assigned_str = ''
if id in assigned:
assigned_str = assigned[id]
if id in products:
products[id]['amount'] += amount
else:
products[id] = {
'name': name,
'assigned_str': assigned_str,
'amount': amount,
'price_unit': price_unit
}
total += amount*price_unit
print('\nTable:')
for id in products:
prod = products[id]
name, assigned_str, amount, price_unit = prod['name'], prod['assigned_str'], prod['amount'], prod['price_unit']
print(name, id, assigned_str, str(amount).replace('.', ','), str(price_unit).replace('.', ','), sep='|')
print('\nTotal = R$', round(total*100)/100)
|
[
"lclpsoz@gmail.com"
] |
lclpsoz@gmail.com
|
dea7dc5a83f238f53995a42c85e7a082cf7c8435
|
67c846e50f9179b062bb9d70b637d4ea00b33e2f
|
/venv/Scripts/easy_install-3.6-script.py
|
0c33e21fc36e535c2ec12d6a20db1b8af6367546
|
[] |
no_license
|
feriosch/Hack2019
|
2cdb71f06e4ba8ca8c1246b8b06f43460de14c9c
|
e1a6085b53ab6509c1eb4e5f4056814c1fa78985
|
refs/heads/master
| 2020-05-17T16:30:50.417049
| 2019-04-28T17:07:44
| 2019-04-28T17:07:44
| 183,820,846
| 0
| 0
| null | 2019-04-28T05:57:23
| 2019-04-27T20:55:24
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
#!C:\Users\ferio\PycharmProjects\Banorte\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"47795404+feriosch@users.noreply.github.com"
] |
47795404+feriosch@users.noreply.github.com
|
7405dcae9c9ff0679f22af578de88618c8ccd5ee
|
42adb09d60cfca14a5beb1f581538d8d730d457c
|
/logic/weapon/Club.py
|
a292e9364387dabb0993e9705d5631aca337e91d
|
[] |
no_license
|
martinKindall/dungeon-game
|
70514e26b245c5208c1e4f4ba0adfd1c9463676f
|
95c6a66ae1c3f39aae5c0d006dbf3df23a61baa4
|
refs/heads/master
| 2022-06-01T15:10:06.675187
| 2020-04-30T01:53:22
| 2020-04-30T01:53:22
| 259,819,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
from logic.weapon.Weapon import Weapon
class Club(Weapon):
def getAttackPoints(self) -> int:
return 2
def __str__(self) -> str:
return "Club"
|
[
"mart256@gmail.com"
] |
mart256@gmail.com
|
d6e1af3c1f70472c05f440c578e0bb66519b95d3
|
205d581673e3960c99e6b8fe1475efb661421cb3
|
/bikeshed/update/main.py
|
1be2b76b3f73f81060b4b4fa57d6141ebd24f5e6
|
[
"CC0-1.0"
] |
permissive
|
TBBle/bikeshed
|
08f9137f7a561d154720297b76ced061cdd6a04a
|
5834a15f311a639c0b59ff2edbf3a060391d15ff
|
refs/heads/master
| 2021-01-12T18:33:43.213471
| 2017-09-29T20:56:24
| 2017-09-29T20:56:24
| 81,327,888
| 0
| 0
| null | 2017-02-08T12:30:22
| 2017-02-08T12:30:21
| null |
UTF-8
|
Python
| false
| false
| 3,886
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import os
from . import updateCrossRefs
from . import updateBiblio
from . import updateCanIUse
from . import updateLinkDefaults
from . import updateTestSuites
from . import updateLanguages
from . import manifest
from .. import config
from ..messages import *
def update(anchors=False, biblio=False, caniuse=False, linkDefaults=False, testSuites=False, languages=False, path=None, dryRun=False, force=False):
if path is None:
path = config.scriptPath("spec-data")
# Update via manifest by default, falling back to a full update only if failed or forced.
if not force:
success = manifest.updateByManifest(path=path, dryRun=dryRun)
if not success:
say("Falling back to a manual update...")
force = True
if force:
# If all are False, update everything
updateAnyway = not (anchors or biblio or caniuse or linkDefaults or testSuites or languages)
if anchors or updateAnyway:
updateCrossRefs.update(path=path, dryRun=dryRun)
if biblio or updateAnyway:
updateBiblio.update(path=path, dryRun=dryRun)
if caniuse or updateAnyway:
updateCanIUse.update(path=path, dryRun=dryRun)
if linkDefaults or updateAnyway:
updateLinkDefaults.update(path=path, dryRun=dryRun)
if testSuites or updateAnyway:
updateTestSuites.update(path=path, dryRun=dryRun)
if languages or updateAnyway:
updateLanguages.update(path=path, dryRun=dryRun)
manifest.createManifest(path=path, dryRun=dryRun)
def fixupDataFiles():
'''
Checks the readonly/ version is more recent than your current mutable data files.
This happens if I changed the datafile format and shipped updated files as a result;
using the legacy files with the new code is quite bad!
'''
try:
localVersion = int(open(localPath("version.txt"), 'r').read())
except IOError:
localVersion = None
try:
remoteVersion = int(open(remotePath("version.txt"), 'r').read())
except IOError, err:
warn("Couldn't check the datafile version. Bikeshed may be unstable.\n{0}", err)
return
if localVersion == remoteVersion:
# Cool
return
# If versions don't match, either the remote versions have been updated
# (and we should switch you to them, because formats may have changed),
# or you're using a historical version of Bikeshed (ditto).
try:
for filename in os.listdir(remotePath()):
copyanything(remotePath(filename), localPath(filename))
except Exception, err:
warn("Couldn't update datafiles from cache. Bikeshed may be unstable.\n{0}", err)
return
def updateReadonlyDataFiles():
'''
Like fixupDataFiles(), but in the opposite direction --
copies all my current mutable data files into the readonly directory.
This is a debugging tool to help me quickly update the built-in data files,
and will not be called as part of normal operation.
'''
try:
for filename in os.listdir(localPath()):
if filename.startswith("readonly"):
continue
copyanything(localPath(filename), remotePath(filename))
except Exception, err:
warn("Error copying over the datafiles:\n{0}", err)
return
def copyanything(src, dst):
import shutil
import errno
try:
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno in [errno.ENOTDIR, errno.EINVAL]:
shutil.copy(src, dst)
else:
raise
def localPath(*segs):
return config.scriptPath("spec-data", *segs)
def remotePath(*segs):
return config.scriptPath("spec-data", "readonly", *segs)
|
[
"jackalmage@gmail.com"
] |
jackalmage@gmail.com
|
d7b4a024eaa06f6f3213a1527629e074bed475ed
|
5185954d3e4a076b779522007148b8420ff3d30b
|
/freeze.py
|
c803becf9318a6a9b29946174616d8faf465bde6
|
[] |
no_license
|
wcraft/first-news-app
|
bdb0cec5e159a1cadce739752f4c383bf0ce2c10
|
5cd960ac04f2eda690aac2726be486b4421c84d3
|
refs/heads/master
| 2021-01-10T01:36:32.709958
| 2016-03-24T19:43:41
| 2016-03-24T19:43:41
| 53,678,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from flask_frozen import Freezer
from app import app, get_csv
freezer = Freezer(app)
@freezer.register_generator
def detail():
for row in get_csv("./static/la-riots-deaths.csv"):
yield {'row_id': row['id']}
if __name__ == '__main__':
freezer.freeze()
|
[
"wcraft1204@gmail.com"
] |
wcraft1204@gmail.com
|
93cc65a38af08b3fdc484d18995da04bb407aee2
|
a689be48a5b5d844fb196df95e1df8e91a3ce59e
|
/python/035. Search Insert Position/searchInsert.py
|
781bea8b68f8f46654f9b95b17b0a2820a389989
|
[] |
no_license
|
stevepomp/LeetCode
|
a923cb0f5af42e4449a922d8edabd289f43c5210
|
9b9f37fe573c8a6a6b3a00a34b2a4c9a7835c16e
|
refs/heads/master
| 2020-03-22T21:18:47.708688
| 2018-08-09T03:46:00
| 2018-08-09T03:46:00
| 140,676,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
'''
Given a sorted array and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
Input: [1,3,5,6], 5
Output: 2
Input: [1,3,5,6], 2
Output: 1
Input: [1,3,5,6], 7
Output: 4
Input: [1,3,5,6], 0
Output: 0
'''
class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
bisect.insort(nums, target)
return nums.index(target)
|
[
"noreply@github.com"
] |
stevepomp.noreply@github.com
|
3183747cd1835046d97a500fd56fc5a714d8f69c
|
f90a30cfafc5d786a3dc269f3ca48dce3fc59028
|
/Payload_Types/apfell/mythic/agent_functions/iterm.py
|
94b35b48c3156d56770b68fba7a567e64efb0415
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
NotoriousRebel/Mythic
|
93026df4a829b7b88de814e805fdce0ab19f3ab9
|
4576654af4025b124edb88f9cf9d0821f0b73070
|
refs/heads/master
| 2022-12-03T01:19:20.868900
| 2020-08-18T03:48:55
| 2020-08-18T03:48:55
| 288,780,757
| 1
| 0
|
NOASSERTION
| 2020-08-19T16:20:19
| 2020-08-19T16:20:18
| null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
from CommandBase import *
import json
class ITermArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
pass
class ITermCommand(CommandBase):
cmd = "iTerm"
needs_admin = False
help_cmd = "iTerm"
description = "Read the contents of all open iTerm tabs if iTerms is open, otherwise just inform the operator that it's not currently running"
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
attackmapping = ["T1139", "T1056"]
argument_class = ITermArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass
|
[
"codybthomas@gmail.com"
] |
codybthomas@gmail.com
|
18a8a1313433d0b60915c2d10c7992f7f4edbf22
|
b9264524ecfa5e3607ee8d70b8b60eb9090c26d2
|
/.github/scripts/core_checker.py
|
f02f4f7bc5789e2dab38f722d8da08ce8d37b0a8
|
[
"MIT"
] |
permissive
|
kilograham/FreeRTOS
|
884fe99a520616796e29aa4fdb0f0c4f6f641756
|
e117bdcd178c4074fd0d255958487807a7b50032
|
refs/heads/main
| 2023-06-10T21:22:00.915782
| 2021-05-07T22:15:00
| 2021-05-07T22:15:00
| 366,805,094
| 2
| 1
|
MIT
| 2021-05-12T17:57:40
| 2021-05-12T17:57:39
| null |
UTF-8
|
Python
| false
| false
| 5,879
|
py
|
#!/usr/bin/env python3
# python >= 3.4
import os
from common.header_checker import HeaderChecker
#--------------------------------------------------------------------------------------------------
# CONFIG
#--------------------------------------------------------------------------------------------------
FREERTOS_IGNORED_EXTENSIONS = [
'.1',
'.ASM',
'.C',
'.DSW',
'.G_C',
'.H',
'.Hbp',
'.IDE',
'.LIB',
'.Opt',
'.PC',
'.PRM',
'.TXT',
'.URL',
'.UVL',
'.Uv2',
'.a',
'.ac',
'.am',
'.atsln',
'.atstart',
'.atsuo',
'.bash',
'.bat',
'.bbl',
'.bit',
'.board',
'.bsb',
'.bsdl',
'.bts',
'.ccxml',
'.cdkproj',
'.cdkws',
'.cfg',
'.cgp',
'.cmake',
'.cmd',
'.config',
'.cpp',
'.cproj',
'.crun',
'.css',
'.csv',
'.custom_argvars',
'.cxx',
'.cydwr',
'.cyprj',
'.cysch',
'.dat',
'.datas',
'.db',
'.dbgdt',
'.dep',
'.dni',
'.dnx',
'.doc',
'.dox',
'.doxygen',
'.ds',
'.dsk',
'.dtd',
'.dts',
'.elf',
'.env_conf',
'.ewd',
'.ewp',
'.ewt',
'.eww',
'.exe',
'.filters',
'.flash',
'.fmt',
'.ftl',
'.gdb',
'.gif',
'.gise',
'.gld',
'.gpdsc',
'.gui',
'.h_from_toolchain',
'.hdf',
'.hdp',
'.hex',
'.hist',
'.history',
'.hsf',
'.htm',
'.html',
'.hwc',
'.hwl',
'.hwp',
'.hws',
'.hzp',
'.hzs',
'.i',
'.icf',
'.ide',
'.idx',
'.in',
'.inc',
'.include',
'.index',
'.inf',
'.ini',
'.init',
'.ipcf',
'.ise',
'.jlink',
'.json',
'.la',
'.launch',
'.lcf',
'.lds',
'.lib',
'.lk1',
'.lkr',
'.lm',
'.lo',
'.lock',
'.lsl',
'.lst',
'.m4',
'.mac',
'.make',
'.map',
'.mbt',
'.mcp',
'.mcpar',
'.mcs',
'.mcw',
'.md',
'.mdm',
'.mem',
'.mhs',
'.mk',
'.mk1',
'.mmi',
'.mrt',
'.mss',
'.mtpj',
'.nav',
'.ntrc_log',
'.opa',
'.opb',
'.opc',
'.opl',
'.opt',
'.opv',
'.out',
'.pack',
'.par',
'.patch',
'.pbd',
'.pdsc',
'.pe',
'.pem',
'.pgs',
'.pl',
'.plg',
'.png',
'.prc',
'.pref',
'.prefs',
'.prj',
'.properties',
'.ps1',
'.ptf',
'.py',
'.r79',
'.rapp',
'.rc',
'.reggroups',
'.reglist',
'.resc',
'.resources',
'.rom',
'.rprj',
'.s79',
'.s82',
'.s90',
'.sc',
'.scf',
'.scfg',
'.script',
'.sct',
'.scvd',
'.session',
'.sfr',
'.sh',
'.shtml',
'.sig',
'.sln',
'.spec',
'.stf',
'.stg',
'.suo',
'.sup',
'.svg',
'.tags',
'.tcl',
'.tdt',
'.template',
'.tgt',
'.tps',
'.tra',
'.tree',
'.tws',
'.txt',
'.ucf',
'.url',
'.user',
'.ut',
'.uvmpw',
'.uvopt',
'.uvoptx',
'.uvproj',
'.uvprojx',
'.vcproj',
'.vcxproj',
'.version',
'.webserver',
'.wpj',
'.wsdt',
'.wsp',
'.wspos',
'.wsx',
'.x',
'.xbcd',
'.xcl',
'.xise',
'.xml',
'.xmp',
'.xmsgs',
'.xsl',
'.yml',
'.md',
'.zip'
]
FREERTOS_IGNORED_PATTERNS = [
r'.*\.git.*',
r'.*mbedtls_config\.h.*',
r'.*mbedtls_config\.h.*',
r'.*CMSIS.*',
r'.*/makefile',
r'.*/Makefile',
r'.*/trcConfig\.h.*',
r'.*/trcConfig\.c.*',
r'.*/trcSnapshotConfig\.h.*'
]
FREERTOS_IGNORED_FILES = [
'fyi-another-way-to-ignore-file.txt',
'mbedtls_config.h',
'requirements.txt',
'run-cbmc-proofs.py',
'.editorconfig',
'lcovrc',
'htif.c', 'htif.h'
]
FREERTOS_HEADER = [
'/*\n',
' * FreeRTOS V202104.00\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
def main():
parser = HeaderChecker.configArgParser()
args = parser.parse_args()
# Configure the checks then run
checker = HeaderChecker(FREERTOS_HEADER)
checker.ignoreExtension(*FREERTOS_IGNORED_EXTENSIONS)
checker.ignorePattern(*FREERTOS_IGNORED_PATTERNS)
checker.ignoreFile(*FREERTOS_IGNORED_FILES)
checker.ignoreFile(os.path.split(__file__)[-1])
rc = checker.processArgs(args)
if rc:
checker.showHelp(__file__)
return rc
if __name__ == '__main__':
exit(main())
|
[
"noreply@github.com"
] |
kilograham.noreply@github.com
|
62f15e21cc7da0172f76ec0118796903115796ca
|
4944541b0cd0fa48a01581ffce5e7ce16f5cf8d7
|
/src/Backend/MbkExam/Notification/serializers.py
|
a64b1c49829f6af25ac8f32051e5c5e42e2348cb
|
[] |
no_license
|
aballah-chamakh/the_exam
|
49a5b5c9d28c61b2283f2d42d2b2fb771dd48bf4
|
dbbbdc7a955ca61572f26430a7788407eaf0c632
|
refs/heads/main
| 2023-03-28T13:19:18.148630
| 2021-04-03T22:12:51
| 2021-04-03T22:12:51
| 354,404,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
from rest_framework import serializers
from .models import AdminNotification,StudentNotification
class AdminNotificationSerializer(serializers.ModelSerializer):
student_username = serializers.CharField(source='student.user.username')
student_img = serializers.CharField(source="student.image.url")
student_slug = serializers.SlugField(source="student.slug")
student_email = serializers.CharField(source="student.user.email")
class Meta :
model = AdminNotification
fields = ('student_email','student_img','student_username',"student_slug",'event_type','event_msg','event_slug','datetime','viewed')
class StudentNotificationSerializer(serializers.ModelSerializer):
student_slug = serializers.SlugField(source="student.slug")
class Meta :
model = StudentNotification
fields = ('student_slug','event_type','event_msg','event_slug','datetime','viewed')
|
[
"chamakhabdallah8@gmail.com"
] |
chamakhabdallah8@gmail.com
|
5a1955b494c614d47f8fb98e8dfc46c8d5d321b8
|
18111180983ce2d2c0f3557741efbbc6234f2c36
|
/layers/modules/multibox_loss.py
|
4424e181c5cf8eb8a96a4edf667c880b8fb64947
|
[] |
no_license
|
bmemm/AP-MTL
|
61ba8f72a6fa9b70cf4cd1ed869f0bfa6b26f5f9
|
30470e818442a4383ed778fa164e79499aed9a9c
|
refs/heads/main
| 2023-08-01T22:42:38.400053
| 2021-09-11T00:45:51
| 2021-09-11T00:45:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from data import coco as cfg
#from data import *
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
#cfg = instruments
self.variance = [0.1, 0.2]
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
#print('self.num_classes',self.num_classes)
#print('predictions', predictions[0].size(),predictions[1].size(), predictions[2].size(), 'targets', np.array(targets).shape)
loc_data, conf_data, priors = predictions
#print('lol',loc_data)
num = loc_data.size(0) #batch
priors = priors[:loc_data.size(1), :]
#print('priors',len(priors))
num_priors = (priors.size(0)) #prior num 8732
num_classes = self.num_classes
#print('num_priors', num_priors)
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
# if targets[idx].nelement() == 0:
# print('target',targets[idx].size())
#print('num', num, len(targets))
#print('targets',targets[idx].size())
truths = targets[idx][:, :-1].data
#truths = targets[idx][:, :-1].data
#print(truths)
labels = targets[idx][:, -1].data
#print('labels',labels)
defaults = priors.data
#print('mobarakkkkkkkkkkkkk ',truths, self.variance, labels)
match(self.threshold, truths, defaults, self.variance, labels,loc_t, conf_t, idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
#print('conf_t match', conf_t.size(), conf_t.data.max(), num)
#print('loct', loc_t)
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t, requires_grad=False)
#print('loct', loc_t)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
#print('mobarak pos', pos)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
#print('mobarak pos_idx', pos)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
#print('lol', loc_p, loc_t)
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
#print('loloo', loss_l)
# Compute max conf across batch for hard negative mining
#print('mobarak conf_data',conf_data.size(),conf_data)
batch_conf = conf_data.view(-1, self.num_classes)
#print('mobarak batch_conf', batch_conf.size(), batch_conf.data.max())
#print('mobarak conf_t', conf_t.size(), conf_t.data.max())
#print('mobarak conf_t.view(-1, 1)', conf_t.view(-1, 1).size(), conf_t.view(-1, 1).data.max())
batch_conf_gat = batch_conf.gather(1, conf_t.view(-1, 1))
#print('mobarak batch_conf_gat', batch_conf_gat.size(), batch_conf_gat)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
#print('mobarak loss_cccccc', loss_c)
# Hard Negative Mining
loss_c = loss_c.view(pos.size()[0], pos.size()[1]) # add line
loss_c[pos] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = num_pos.data.sum().double()
loss_l = loss_l.double()
loss_c = loss_c.double()
loss_l /= N
loss_c /= N
return loss_l, loss_c
|
[
"mobarakol@u.nus.edu"
] |
mobarakol@u.nus.edu
|
4c800d767661ee69f80d462a929fd68be4f8b58f
|
a39dbda2d9f93a126ffb189ec51a63eb82321d64
|
/mongoengine/queryset/__init__.py
|
026a7acdd533719065dcc1c7c1955565b13d6f6f
|
[
"MIT"
] |
permissive
|
closeio/mongoengine
|
6e22ec67d991ea34c6fc96e9b29a9cbfa945132b
|
b083932b755a9a64f930a4a98b0129f40f861abe
|
refs/heads/master
| 2023-04-30T04:04:52.763382
| 2023-04-20T07:13:41
| 2023-04-20T07:13:41
| 5,533,627
| 21
| 5
|
MIT
| 2023-04-20T07:13:42
| 2012-08-23T23:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 525
|
py
|
from mongoengine.errors import (DoesNotExist, MultipleObjectsReturned,
InvalidQueryError, OperationError,
NotUniqueError)
from mongoengine.queryset.field_list import *
from mongoengine.queryset.manager import *
from mongoengine.queryset.queryset import *
from mongoengine.queryset.transform import *
from mongoengine.queryset.visitor import *
__all__ = (field_list.__all__ + manager.__all__ + queryset.__all__ +
transform.__all__ + visitor.__all__)
|
[
"ross.lawley@gmail.com"
] |
ross.lawley@gmail.com
|
f552c9dfa2e04cea58e9d077a133917bf59a3b5b
|
8a83b2b6e5906b28de898123d8a895727f1dd300
|
/ai/cf_ai/cf_action_eval_ai.py
|
94fd1bc7f3f3a8ffc0d5eea8e1deadf722523779
|
[] |
no_license
|
iamsure89/ticket_to_ride
|
bac86f45e2d1b03b12a90908fa0978a4996f8332
|
59ebcd65707faa4471c367fc7b5ae5852f0ad6d2
|
refs/heads/master
| 2022-06-23T07:53:29.154858
| 2016-12-17T21:45:53
| 2016-12-17T21:45:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
from random import randrange
import game.board as board
from game import Player, Game
from game.actions import *
from game.classes import Colors
from game.methods import find_paths_for_destinations
from cf_base_ai import CFBaseAI
class CFActionEvalAI(CFBaseAI):
"Evaluate Every Action Based on the cost function"
Destination_Threshold = 15
Wild_Card_Value = 2
Wild_Card_Cost = 9
Threat_Action_Weight = 0 # weight when combined with other cost
gui_debug = False
def __init__(self, name):
CFBaseAI.__init__(self, name)
self.remaining_edge_score = 0
self.threatened_edges = []
self.threatened_edges_score = []
def make_decision(self, game):
"""
Evaluate every available action and select the best
:param game:
:return:
"""
# update remaining edge score
self.remaining_edge_score = 0
for edge in self.remaining_edge:
self.remaining_edge_score += board.get_scoring()[edge.cost]
# evaluate the threaten edge first
self.eval_threatened_edges()
# decision making part
if not self.opponent_name:
self.opponent_name = game.get_opponents_name(self)
# calculate the value of each action
values = []
for action in self.available_actions:
value = self.eval_action(action)
values.append(value)
# if self.print_debug:
# print action, "has value", value
if self.print_debug:
self.print_cards_needed()
action = self.available_actions[values.index(max(values))]
self.action_history.append(action)
return action
def eval_threatened_edges(self):
# this will be implemented in combined AI
pass
def eval_action(self, action):
"""
Evaluate action based on path and cost function
:param action: the action to be evaluated
:return: the value of the action
"""
value = 0
if action.is_connect():
# add the score to the value first
# value += board.get_scoring()[action.edge.cost]
# add the value of threatened edge
# in CFAE this won't have any effect
if self.threatened_edges:
for id, edge in enumerate(self.threatened_edges):
if action.edge == edge:
value += self.threatened_edges_score[id] * self.Threat_Action_Weight
if self.print_debug:
print action
print '#### Threaten Action #####:', value
# if we have path, we double reward the action
if self.path is not None:
if action.edge in self.remaining_edge:
# intuition here is:
# when we just have a few edge remains, the action to claim those edge would be high
value += self.Wild_Card_Value + board.get_scoring()[action.edge.cost] \
+ self.path.score - self.remaining_edge_score
if self.print_debug:
print "Path action ", action
print "Before: ", value
else: # edge is not in path
# intuition here is if the we have path, and we may still claim it
# if it has higher score than the remaining destination
# value += board.get_scoring()[action.edge.cost] - self.path.cost
# intuition here is never claim other routes
value += -1
# subtract the cost of card using if we have a path
for card in action.cards.elements():
# if card is Wild card
if card == Colors.none:
value -= self.Wild_Card_Cost
else:
# if edge is gray, make sure it doesn't take the cards we need for other edge
if action.edge.color == Colors.none:
value -= self.cards_needed[card]
else: # if path is None
# when we don't have path, we better claim the best path that has the highest score
value += board.get_scoring()[action.edge.cost]
if action.edge in self.remaining_edge or action.edge in self.threatened_edges:
if self.print_debug:
print "After: ", value
return value
if action.is_draw_destination():
if self.info.destinations:
return -1
else: # if we don't have destination card
value = -self.Destination_Threshold + self.info.num_cars
return value
if action.is_draw_deck():
value += 1
return value
if action.is_draw_face_up():
if action.card == Colors.none:
value += self.Wild_Card_Value
else:
value += self.cards_needed[action.card]
if self.print_debug:
print action, " has value ", value
return value
def game_ended(self, game):
"""
end of the game, let's print some shit
:param game:
:return:
"""
if self.print_debug:
print "%s made decisions as below:" % self.name
for action in self.action_history:
print action
print "########\nDi:To cancel the action print in cf_action_eval_ai.py line 25-26\n#########\n"
# if self.gui is not None:
# self.gui.close()
|
[
"di.zeng@transcendrobotics.com"
] |
di.zeng@transcendrobotics.com
|
41387b0edba51a241184f0ce0eaf133c66de62b7
|
7c92b1e9faf4f7823373dfecfb5ad3a4e4840961
|
/s11b-rm-ko.py
|
429618006e3dbaa1c40a3d7f7cf3de090141d053
|
[
"BSD-3-Clause"
] |
permissive
|
CardiacModelling/AtrialLowK
|
7fcd9db47c4551db4decc9b8113abac07edcd9bf
|
c73a294de137c416eb6dbe5e57231206bcef9d86
|
refs/heads/main
| 2023-08-10T21:24:45.472769
| 2023-08-02T13:10:34
| 2023-08-02T13:10:34
| 196,734,580
| 0
| 0
|
BSD-3-Clause
| 2023-08-02T13:10:35
| 2019-07-13T15:05:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,921
|
py
|
#!/usr/bin/env python3
#
# Rm calculcations with different step sizes and durations
#
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.gridspec
import myokit
import shared
# Get path for figure
fpath = shared.figure_dir()
fname = 'figure-s11b-rm-ko'
debug = 'debug' in sys.argv
shared.splash(fname)
# Create protocol
cl = 1000
protocol = myokit.pacing.blocktrain(cl, duration=0.5, offset=50)
# Load and prepare model
model = shared.model('voigt')
shared.prepare_model(model, protocol, pre_pace=not debug)
# Maximum time to show in plots
tmax = 800
# Time to pre-pace
pre_time = 1000
# Create figure
fig = plt.figure(figsize=(9, 11.6))
fig.subplots_adjust(0.07, 0.045, 0.92, 0.99, hspace=0.095, wspace=0.09)
grid = matplotlib.gridspec.GridSpec(7, len(shared.ko_levels))
def rm_plot(fig, i, j, model, protocol, simulation, color, t, v, dt, dv):
"""Plot AP and Rm"""
simulation.reset()
d = simulation.run(tmax, log=[t, v]).npview()
ax = fig.add_subplot(grid[i, j])
if i == 6:
ax.set_xlabel('Time (ms)')
else:
ax.set_xticklabels('')
if j == 0:
ax.set_ylabel('V (mV)')
else:
ax.set_yticklabels('')
ax.plot(pre_time + d.time(), d[v], color=color)
ax = ax.twinx()
if j == 3:
ax.set_ylabel('Rm (MOhm)')
ax.yaxis.set_label_position('right')
else:
ax.set_yticklabels('')
ax.set_ylim(0, 3000)
ax.text(0.95, 0.85, f'dt={dt} dv={dv}',
horizontalalignment='right',
fontdict={'size': 8},
transform=ax.transAxes)
if j == 0:
ax.text(0.95, 0.70, model.name(),
horizontalalignment='right',
fontdict={'size': 7},
transform=ax.transAxes)
if not debug:
ts, rs = shared.rm(model, protocol, dt, dv, tmax, simulation)
ax.plot(pre_time + ts, rs, color='tab:green', ls=':')
rs[rs < 0] = float('nan')
ax.plot(pre_time + ts, rs, color='tab:green', label=f'{dt}ms {dv}mV')
# Plot for all
for i, (k, c) in enumerate(zip(shared.ko_levels, shared.ko_colors)):
print(f'Adding plots for {k}mM')
t = model.time()
v = model.label('membrane_potential')
model.labelx('K_o').set_rhs(k)
s = myokit.Simulation(model, protocol)
s.set_tolerance(1e-8, 1e-8)
if pre_time:
s.pre(pre_time)
rm_plot(fig, 0, i, model, protocol, s, c, t, v, 5, 5)
rm_plot(fig, 1, i, model, protocol, s, c, t, v, 10, 5)
rm_plot(fig, 2, i, model, protocol, s, c, t, v, 20, 2)
rm_plot(fig, 3, i, model, protocol, s, c, t, v, 10, 10)
rm_plot(fig, 4, i, model, protocol, s, c, t, v, 0.1, 2)
rm_plot(fig, 5, i, model, protocol, s, c, t, v, 0.1, 5)
rm_plot(fig, 6, i, model, protocol, s, c, t, v, 0.1, 10)
# Show / store
path = os.path.join(fpath, fname)
print('Saving to ' + path)
plt.savefig(path + '.png')
plt.savefig(path + '.pdf')
print('Done')
|
[
"michael.clerx@nottingham.ac.uk"
] |
michael.clerx@nottingham.ac.uk
|
5c6ed42382f1049861f15353853dc3dcd95f30ef
|
bbd77439acd004b394552d59c2a4eb2293a76387
|
/jam_ro/execute.py
|
cf88d32702ac6cca8bb4755154c7a4af3da496a2
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
platipusica/jampy-demo
|
e041833d18df47ab8a3c8193e9a6c8695b686d7c
|
ecd30c028b66ede4672a951c93d6d60aca1bba39
|
refs/heads/master
| 2022-12-08T21:41:47.425379
| 2022-12-07T02:45:15
| 2022-12-07T02:45:15
| 135,378,069
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,682
|
py
|
from __future__ import division
import sys, os
import datetime
import traceback
from werkzeug._compat import string_types
from .common import consts, error_message
def execute_select(cursor, db_module, command):
# ~ print('')
# ~ print(command)
try:
cursor.execute(command)
except Exception as x:
consts.app.log.exception(error_message(x))
# ~ print('\nError: %s\n command: %s' % (str(x), command))
raise
return db_module.process_sql_result(cursor.fetchall())
def execute(cursor, command, params=None):
# ~ print('')
# ~ print(command)
# ~ print(params)
try:
if params:
cursor.execute(command, params)
else:
cursor.execute(command)
except Exception as x:
consts.app.log.exception(error_message(x))
# ~ print('\nError: %s\n command: %s\n params: %s' % (str(x), command, params))
raise
def execute_command(cursor, db_module, command, params=None, select=False):
if select:
result = execute_select(cursor, db_module, command)
else:
result = execute(cursor, command, params)
return result
def process_delta(cursor, db_module, delta, master_rec_id, result):
ID, sqls = delta
result['ID'] = ID
changes = []
result['changes'] = changes
for sql in sqls:
(command, params, info, h_sql, h_params, h_del_details), details = sql
if h_del_details:
for d_select, d_sql, d_params in h_del_details:
ids = execute_select(cursor, db_module, d_select)
for i in ids:
d_params[1] = i[0]
execute(cursor, d_sql, db_module.process_sql_params(d_params, cursor))
if info:
rec_id = info.get('pk')
inserted = info.get('inserted')
if inserted:
master_pk_index = info.get('master_pk_index')
if master_pk_index:
params[master_pk_index] = master_rec_id
pk_index = info.get('pk_index')
gen_name = info.get('gen_name')
if not rec_id and db_module.get_lastrowid is None and gen_name and \
not pk_index is None and pk_index >= 0:
next_sequence_value_sql = db_module.next_sequence_value_sql(gen_name)
if next_sequence_value_sql:
cursor.execute(next_sequence_value_sql)
rec = cursor.fetchone()
rec_id = rec[0]
params[pk_index] = rec_id
if params:
params = db_module.process_sql_params(params, cursor)
if command:
before = info.get('before_command')
if before:
execute(cursor, before)
execute(cursor, command, params)
after = info.get('after_command')
if after:
execute(cursor, after)
if inserted and not rec_id and db_module.get_lastrowid:
rec_id = db_module.get_lastrowid(cursor)
result_details = []
if rec_id:
changes.append({'log_id': info['log_id'], 'rec_id': rec_id, 'details': result_details})
for detail in details:
result_detail = {}
result_details.append(result_detail)
process_delta(cursor, db_module, detail, rec_id, result_detail)
elif command:
execute(cursor, command, params)
if h_sql:
if not h_params[1]:
h_params[1] = rec_id
h_params = db_module.process_sql_params(h_params, cursor)
execute(cursor, h_sql, h_params)
def execute_delta(cursor, db_module, command, params, delta_result):
delta = command['delta']
process_delta(cursor, db_module, delta, None, delta_result)
def execute_list(cursor, db_module, command, delta_result, params, select):
res = None
for com in command:
command_type = type(com)
if command_type in string_types:
res = execute_command(cursor, db_module, com, params, select)
elif command_type == dict:
res = execute_delta(cursor, db_module, com, params, delta_result)
elif command_type == list:
res = execute_list(cursor, db_module, com, delta_result, params, select)
elif command_type == tuple:
res = execute_command(cursor, db_module, com[0], com[1], select)
elif not com:
pass
else:
raise Exception('server_classes execute_list: invalid argument - command: %s' % command)
return res
def execute_sql_connection(connection, command, params, select, db_module, close_on_error=False, autocommit=True):
delta_result = {}
result = None
error = None
info = ''
try:
cursor = connection.cursor()
command_type = type(command)
if command_type in string_types:
result = execute_command(cursor, db_module, command, params, select)
elif command_type == dict:
res = execute_delta(cursor, db_module, command, params, delta_result)
elif command_type == list:
result = execute_list(cursor, db_module, command, delta_result, params, select)
else:
result = execute_command(cursor, db_module, command, params, select)
if autocommit:
if select:
connection.rollback()
else:
connection.commit()
if delta_result:
result = delta_result
except Exception as x:
try:
if connection:
connection.rollback()
if close_on_error:
connection.close()
error = str(x)
if not error:
error = 'SQL execution error'
traceback.print_exc()
finally:
if close_on_error:
connection = None
finally:
result = connection, (result, error)
return result
def execute_sql(db_module, db_server, db_database, db_user, db_password,
db_host, db_port, db_encoding, connection, command,
params=None, select=False):
if connection is None:
try:
connection = db_module.connect(db_database, db_user, db_password, db_host, db_port, db_encoding, db_server)
except Exception as x:
consts.app.log.exception(error_message(x))
# ~ print(str(x))
return None, (None, str(x))
return execute_sql_connection(connection, command, params, select, db_module, close_on_error=True)
|
[
"dbabic"
] |
dbabic
|
da0f752f37d66f5033607317460320c51b7d99e2
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_f/vpc_create.py
|
72acec139ecc5774ba67c1d8199de44fc116c546
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-vpc : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-vpc.html
describe-vpcs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-vpcs.html
"""
write_parameter("ec2", "create-vpc")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
eb4921718ea76bd76fd0d09bef6d3040445b07fe
|
bfd6ac084fcc08040b94d310e6a91d5d804141de
|
/PulseSequences2/multi2d_test2.py
|
1609e844e7a2e84b959142d2d35d97635fe46e69
|
[] |
no_license
|
jqwang17/HaeffnerLabLattice
|
3b1cba747b8b62cada4467a4ea041119a7a68bfa
|
03d5bedf64cf63efac457f90b189daada47ff535
|
refs/heads/master
| 2020-12-07T20:23:32.251900
| 2019-11-11T19:26:41
| 2019-11-11T19:26:41
| 232,792,450
| 1
| 0
| null | 2020-01-09T11:23:28
| 2020-01-09T11:23:27
| null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import numpy as np
from common.devel.bum.sequences.pulse_sequence import pulse_sequence
from labrad.units import WithUnit as U
from treedict import TreeDict
from common.client_config import client_info as cl
from multi_test import multi_test
class multi2d_test2(pulse_sequence):
is_2dimensional = True
is_composite = True
show_params = ['NSY.pi_time']
scannable_params = {
'Heating.background_heating_time': [(0., 5000., 500., 'us'), 'current']
}
fixed_params = {'StateReadout.ReadoutMode':'pmt'}
sequence = multi_test
@classmethod
def run_finally(cls, cxn, parameter_dct, all_data, data_x):
return 0.1
|
[
"haeffnerlab@gmail.com"
] |
haeffnerlab@gmail.com
|
b2a641f6cfdf9e6783620ae9de8ec4a54e4811f0
|
562a487181ba6605dd119ba7fb14f3ee7e4b0832
|
/CSE_231/Lab/Lab 9/lab09a.py
|
962322851fca48f296e96809c56cffeb322ec31a
|
[] |
no_license
|
JudeJang7/CSE_231
|
d25930ff9c43e909dfff4fc483cdf4c893286d87
|
a91148d843cecab908357fda8d3ec8fd59faa365
|
refs/heads/master
| 2020-04-05T01:11:14.994327
| 2018-11-06T18:12:49
| 2018-11-06T18:12:49
| 156,426,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
import string
def build_word_set( input_file ):
word_set = set()
for line in input_file:
# Making a list of every word in the line
word_lst = line.strip().split()
# Puts all of the word in lower case and gets rid of all punctuation
word_lst = [w.lower().strip(string.punctuation) for w in word_lst]
for word in word_lst:
if word != "":
# If the world is not an empty string, add it to the set of words
word_set.add( word )
return word_set
def compare_files( file1, file2 ):
# Build two sets:
# all of the unique words in file1
# all of the unique words in file2
set1 = build_word_set(file1)
set2 = build_word_set(file2)
# Display the total number of unique words between the
# two files. If a word appears in both files, it should
# only be counted once.
unique_word_count = set1 | set2
print("Total unique words:", len(unique_word_count))
# Display the number of unique words which appear in both
# files. A word should only be counted if it is present in
# both files.
unique_word_in_both_count = set1 & set2
print("Unique words that appear in both files:", len(unique_word_in_both_count))
######################################################################
f1 = open( "document1.txt" )
f2 = open( "document2.txt" )
compare_files( f1, f2 )
f1.close()
f2.close()
|
[
"noreply@github.com"
] |
JudeJang7.noreply@github.com
|
5f9505e1e1b70de95dcb28c6046a8d0e0f7887d4
|
99d268ebdbd2ace4f1d7a77287233f79d3e568a6
|
/ABC.py
|
174dd811d1789206877702fd869b7f2f1feb263a
|
[] |
no_license
|
hakannatayy/KutuphaneSqlite
|
9ed1ec1fdbf08059451f9c9d8ecde7df419e03ba
|
e5dbfd1b49c1edd0967d8395daa2dc3b645012ac
|
refs/heads/master
| 2020-06-19T22:12:50.218842
| 2019-07-14T23:39:15
| 2019-07-14T23:39:15
| 196,893,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
import sqlite3
db = sqlite3.connect("Kitaplar.sqlite")
imlec = db.cursor()
menu="""
[1] Kitap Ara
[2] Yazar Ara
"""
print(menu)
islem=input("işleminiz: ")
if islem =="1":
isim = input("Kitap Adı: ")
sorgu = "SELECT * FROM 'kitaplik' WHERE kitap = '{}'".format(isim)
imlec.execute(sorgu)
veriler = imlec.fetchall()
for i in veriler:
print(i)
if islem =="2":
isim = input("Yazar Adı: ")
sorgu = "SELECT * FROM 'kitaplik' WHERE yazar = '{}'".format(isim)
imlec.execute(sorgu)
veriler = imlec.fetchall()
for i in veriler:
print(i)
db.close()
|
[
"noreply@github.com"
] |
hakannatayy.noreply@github.com
|
5fda096a90541b4f8f01c8692ee9f34c6977c70a
|
b40a140a911279f3c61737367ab8f3b7c15fe98b
|
/avakas/get_parameters_file.py
|
6f6976a02b4d1dc3baa10e6796e10d3f55ed8aa2
|
[] |
no_license
|
AurelienNioche/HotellingBathtub
|
80fef9b4106454ec339a6c106c52738f1e95e77b
|
5b370a20b1d2417022fd2a6de8a7a4baeeda321e
|
refs/heads/master
| 2021-05-06T13:02:04.130850
| 2018-02-16T22:47:01
| 2018-02-16T22:47:01
| 113,213,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
import os
def get_parameters_file(i):
parameters_files = sorted(
[os.path.join("tasks", f)
for f in os.listdir("tasks") if os.path.isfile(os.path.join("tasks", f))])
return parameters_files[i]
|
[
"nioche.aurelien@gmail.com"
] |
nioche.aurelien@gmail.com
|
5d2b4dfaf7911d6060185e33b0f2355572409db8
|
9137e1b185c5588f6e5f057a2a23f85aaa53ffcb
|
/time.py
|
7d3770849e29989662a33eb6c90de6916c0563f5
|
[] |
no_license
|
jslijb/python3.x
|
e8335078cab08939984fb88261aea96f957f4886
|
5675c2cd23b3b9ac63ac13dffb02a8fa3c41b95b
|
refs/heads/master
| 2021-09-02T23:23:23.109466
| 2018-01-04T03:13:55
| 2018-01-04T03:13:55
| 114,987,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
# Countdown using Tkinter
from Tkinter import *
import time
import tkMessageBox
class App:
def __init__(self,master):
frame = Frame(master)
frame.pack()
self.entryWidget = Entry(frame)
self.entryWidget["width"] = 15
self.entryWidget.pack(side=LEFT)
self.hi_there = Button(frame,text="Start",command=self.start)
self.hi_there.pack(side=LEFT)
self.button = Button(frame,text="QUIT",fg="red",command=frame.quit)
self.button.pack(side=LEFT)
def start(self):
text = self.entryWidget.get().strip()
if text != "":
num = int(text)
self.countDown(num)
def countDown(self,seconds):
lbl1.config(bg='yellow')
lbl1.config(height=3, font=('times',20,'bold'))
for k in range(seconds, 0, -1):
lbl1["text"] = k
root.update()
time.sleep(1)
lbl1.config(bg='red')
lbl1.config(fg='white')
lbl1["text"] = "Time up!"
tkMessageBox.showinfo("Time up!","Time up!")
def GetSource():
get_window = Tkinter.Toplevel(root)
get_window.title('Source File?')
Tkinter.Entry(get_window, width=30,
textvariable=source).pack()
Tkinter.Button(get_window, text="Change",
command=lambda: update_specs()).pack()
root = Tk()
root.title("Countdown")
lbl1 = Label()
lbl1.pack(fill=BOTH, expand=1)
app = App(root)
root.mainloop()
|
[
"jslijb@126.com"
] |
jslijb@126.com
|
464e3a6a337dc9407f02ce39e571e1809949a6a1
|
a719b8d93e0517badd0d9c3c61c7e3149fb80b46
|
/exhaustive_search.py
|
38e7219edf5abd2c29d6cd6ef898a61c60c08c02
|
[] |
no_license
|
srishilesh/Optimization_Algorithms
|
01b9a8648b788b87a443ef07299b44ecb74a8791
|
ff688e5a90b7d32844bbef2b7e3fcdd51d33bdc4
|
refs/heads/master
| 2020-08-05T12:40:31.154220
| 2019-10-03T06:17:01
| 2019-10-03T06:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
print("\n *** EXHAUSTIVE SEARCH METHOD *** \n")
a = int(input("Enter the lower bound : "))
b = int(input("Enter the upper bound : "))
n = int(input("Enter the number of steps : "))
def func(x):
if(x!=0):
f = (x*x) + (54/x)
else:
f = 100000
return f
cx = (b-a)/n
x1 = a
x2 = x1 + cx
x3 = x2 + cx
f = 0
while(True):
if(func(x1)>=func(x2) and func(x2)<=func(x3)):
f = 1
break
else:
f = 0
x1 = x2
x2 = x3
x3 = x2 + cx
if(f==1):
print("Minimum lies between {} and {} ".format(x1,x3))
else:
print("Minimum not found")
|
[
"noreply@github.com"
] |
srishilesh.noreply@github.com
|
7c238c319c6f6d8ba62cadcb28faf56b3f32ab3b
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/AtCoder/AtCoder Beginner Contest 247/B.py
|
973864707113b363529868eab237a721c0f7de7b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860
| 2023-07-09T15:30:27
| 2023-07-09T15:30:27
| 3,009,276
| 51
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from collections import defaultdict
n = int(raw_input())
d1 = defaultdict(int)
d2 = defaultdict(int)
names = []
for i in xrange(n):
name1, name2 = raw_input().split()
d1[name1] += 1
d2[name2] += 1
names.append((name1, name2))
flag = True
for (name1, name2) in names:
if name1 == name2:
if d1[name1] > 1 or d2[name1] > 1:
flag = False
break
else:
if ((d1[name1] <= 1 and d2[name1] == 0) or
(d1[name2] == 0 and d2[name2] <= 1)):
pass
else:
flag = False
break
if flag:
print 'Yes'
else:
print 'No'
'''
^^^^TEST^^^^
3
tanaka taro
tanaka jiro
suzuki hanako
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
aaa bbb
xxx aaa
bbb yyy
-----
No
$$$TEST$$$
^^^^TEST^^^^
2
tanaka taro
tanaka taro
-----
No
$$$TEST$$$
^^^^TEST^^^^
3
takahashi chokudai
aoki kensho
snu ke
-----
Yes
$$$TEST$$$
^^^^TEST^^^^
3
a a
b b
c a
-----
No
$$$TEST$$$
'''
|
[
"noreply@github.com"
] |
Wizmann.noreply@github.com
|
f58ab100b7516d0f0afa1a0daff5254a61b54ada
|
eeb609b8555a4ac6b948c5ce2485a12a88ad2c4b
|
/app_calendar/views.py
|
0c73d3b88588054b4b49ea2e17cbd1509693954f
|
[] |
no_license
|
Karasevgen1205/The_calendar
|
ab99b99ecc358024c0b49c90d457c0c10d04e577
|
c90ab3fbf9f113379b12f6b255ccda100d7241b0
|
refs/heads/master
| 2023-03-12T00:59:22.492732
| 2021-03-01T19:33:56
| 2021-03-01T19:33:56
| 332,513,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
import django_filters
import holidays
from django.http import request
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend, FilterSet, BaseInFilter, CharFilter, RangeFilter
from ics import Calendar, Event
from rest_framework.generics import ListAPIView, UpdateAPIView, CreateAPIView
from rest_framework.viewsets import ModelViewSet
from app_calendar.models import Holiday, Country
from app_calendar.serializer import CountrySerializer, EventSerializer, UserSerializer, \
HolidaySerializerRead, HolidaySerializerWrite
class ListCountries(ListAPIView):
serializer_class = CountrySerializer
# queryset = Country.objects.all()
# filter_backends = (DjangoFilterBackend)
# filter_fields = ['name']
def get_queryset(self):
# if self.kwargs.get('name'):
# return Country.objects.filter(name=self.kwargs.get('name'))
return Country.objects.all()
class CountryViewSet(ListAPIView):
serializer_class = CountrySerializer
queryset = Country.objects.all()
filter_backends = (DjangoFilterBackend)
filter_fields = ['name']
# class CharFilterInFilter(BaseInFilter, CharFilter):
# pass
#
# class CountryFilter(FilterSet):
# name = CharFilterInFilter(field_name='country__name', lookup_expr='in')
# id = RangeFilter()
#
# class Meta:
# model = Country
# fields = ['id', 'name']
class ListHolidays(ListAPIView):
serializer_class = HolidaySerializerRead
def get_queryset(self):
# if self.kwargs.get('id'):
# return Holiday.objects.filter(id=self.kwargs.get('id'))
return Holiday.objects.all()
class ListCountryHolidays(ListAPIView):
serializer_class = HolidaySerializerRead
def get_queryset(self):
if self.kwargs.get('id'):
return Holiday.objects.filter(country=self.kwargs.get('id'))
return Holiday.objects.all()
class CreateHolidays(CreateAPIView):
serializer_class = HolidaySerializerWrite
|
[
"Karasevgen1205@yandex.ru"
] |
Karasevgen1205@yandex.ru
|
d0d20fa9e6734a72dee78b47ba0146e83bb3c2c1
|
aa0d747ebd4fe6a0d6ba421df1b55bbf7e56449f
|
/lobby/admin.py
|
1e848b1ebea4a77de45e23bc10fef9a11807e178
|
[] |
no_license
|
cardholder/server-side
|
0dbaca74d1962bd4813f8ca38f0b1ceccfa49019
|
bbe0eda02286d5722882e1d4b4c95a31024176f4
|
refs/heads/master
| 2022-11-14T02:26:25.979819
| 2019-07-14T10:54:58
| 2019-07-14T10:54:58
| 183,203,087
| 0
| 0
| null | 2022-11-04T19:35:10
| 2019-04-24T10:08:01
|
Python
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
from django.contrib import admin
from .models import *
admin.site.register(Game)
admin.site.register(Card)
admin.site.register(CardSet)
|
[
"stefan.kroeker@fh-bielefeld.de"
] |
stefan.kroeker@fh-bielefeld.de
|
0615c365276b7014f1b2c30106aefa87169532e2
|
20190c7bfbb96819ae707c0c0dd36d4628bd7661
|
/cons.py
|
6a73ceda7a3c7f2a81f2065d832dc58db9f7faf6
|
[] |
no_license
|
Jananisathya/jananisathya
|
f2ccea6da30ee70a780ce8410ec6c2cd4bf90ea0
|
e6b38d860d93690a9176a679a47cd1fa76043412
|
refs/heads/master
| 2020-06-29T20:47:23.069326
| 2020-01-07T04:22:20
| 2020-01-07T04:22:20
| 200,620,459
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
ch = input("Enter Character : ")
if(ch == 'a' or ch == 'e' or ch == 'i' or ch == 'o' or ch == 'u' or ch == 'A'
or ch == 'E' or ch == 'I' or ch == 'O' or ch == 'U'):
print("Vowel")
else:
print("Constant")
|
[
"noreply@github.com"
] |
Jananisathya.noreply@github.com
|
62c4cad48205db17deb8d43ff05d2268ff749191
|
1e60b1b311e4e1ced836f43ef055c65f5e78f7ef
|
/test/functional/feature_block.py
|
1e075691fa322b966b361988062a716df107a1a1
|
[
"MIT"
] |
permissive
|
liufile/BlackHatWallet
|
529bd4b492dbf672aa3d7b1f7dd456e53508fdc4
|
0e6b310fb6cb9bdb3b51a81ab55e606efed891f2
|
refs/heads/master
| 2023-04-24T13:49:07.117712
| 2021-05-01T12:34:50
| 2021-05-01T12:34:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,998
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing.
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
from test_framework.mininode import network_thread_start
import struct
class PreviousSpendableOutput():
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure blkcd isn't accepting b26
block(27, spend=out[7])
yield rejected(False)
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure blkcd isn't accepting b28
block(29, spend=out[7])
yield rejected(False)
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# A block with invalid work
tip(44)
b47 = block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target: #changed > to <
b47.nNonce += 1
b47.rehash()
yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
|
[
"contact@blackhatco.in"
] |
contact@blackhatco.in
|
8987a79b8238e079d6527786951d545fffd1ab1c
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/test/menu_sun_integration/infrastructure/aws/sqs/mocks/customer_mock.py
|
a7b78f7010ca6a18c5de255b002fa7e7ea1d8312
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409
| 2020-10-16T03:41:54
| 2020-10-16T03:41:54
| 304,511,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,321
|
py
|
def mock_queue_make_api_call(self, operation_name, kwarg):
if operation_name == 'SendMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'MessageId': '85e8a505-2ba4-4fa3-a93c-cc30bf5e65e7',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
if operation_name == 'ReceiveMessage':
return {'Messages': [{'MessageId': '92de7972-f8e5-4998-a182-3977455f8cb0',
'ReceiptHandle': 'AQEBWvhuG9mMCVO0LE7k'
'+flexfAzfGFn4yGRI5Xm60pwu1RwlGot4GqWveL1tOYmUTM63bwR+OFj5CL'
'/e1ZchKlZ0DTF6rc9Q+pyNdbIKckaVrfgbYySsZDkr68AtoWzFoIf0U68SUO83ys0ydK'
'+TSHgpw38zKICpupwccqe67HDu2Vve6ATFtjHa10+w3fU6l63NRFnmNeDjuDw'
'/uq86s0puouRFHQmoeNlLg'
'/5wjlT1excIDKxlIvJFBoc420ZgxulvIOcblqUxcGIG6Ah6x3aJw27q14vT'
'+0wRi9aoQ8dG0ys57OeWjlRRG3UII1J5uiShet9F15CKF3GZatNEZOOXkIqdQO'
'+lMHIhwMt7wls2EMtVO4KFIdWokzIFhidzfAHMTANCoAD26gUsp2Z9UyZaA==',
'MD5OfBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'Body': '{"integration_type": "BRF","seller_id": 1,"seller_code": "ABC",'
'"document": "00005234000121",'
'"cep": "09185030",'
'"credit_limit": "103240.72",'
'"customer_id": "1",'
'"payment_terms":['
'{"deadline": 5,"description": "Payment 5","payment_type": "BOLETO"},'
'{"deadline": 10,"description": "Payment 10","payment_type": "CHEQUE"}],'
'"seller_metafields": [{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_7",'
'"value": "007"},{"namespace": "CODIGO_PAGAMENTO","key": "BOLETO_14",'
'"value": "014"}],'
'"customer_metafields": [{"namespace": "Customer Namespace 1",'
'"key": "Customer Key 1",'
'"value": "Customer VALUE 1"},{"namespace": "Customer Namespace 2",'
'"key": "Customer Key 2","value": "Customer VALUE 2"}]}'},
],
'ResponseMetadata': {'RequestId': '0ffbdfb3-809f-539e-84dd-899024785f25',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '0ffbdfb3-809f-539e-84dd-899024785f25',
'date': 'Fri, 18 Oct 2019 11:31:51 GMT',
'content-type': 'text/xml',
'content-length': '892'}, 'RetryAttempts': 0}}
if operation_name == 'DeleteMessage':
return {'MD5OfMessageBody': 'a836c42e687e8a08e66a794a5dacd8c1',
'ResponseMetadata': {'RequestId': '7313c686-bca3-5d79-9295-90a51d270c9c',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '7313c686-bca3-5d79-9295-90a51d270c9c',
'date': 'Fri, 18 Oct 2019 11:17:24 GMT',
'content-type': 'text/xml', 'content-length': '378'},
'RetryAttempts': 0}}
|
[
"pfpacheco@gmail.com"
] |
pfpacheco@gmail.com
|
ad62e62ad82ee0af317cf8a52a60259baf388df5
|
5be744f908ea25bd5442dfb4cb8a24a0d7941e14
|
/projects/admin.py
|
c8920bf36677cf7e0c9cc134b005263cc4b8b0d0
|
[] |
no_license
|
CalebMuhia/JobsBoard
|
f986d7c4af939dded0a3e2f8305a444f3502bad3
|
66c40dd5151261bc7e4fb8309a6139d11604f215
|
refs/heads/master
| 2022-07-07T22:49:06.733313
| 2022-06-23T20:52:20
| 2022-06-23T20:52:20
| 4,616,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
__author__ = 'caleb'
from django.contrib import admin
from projects.models import *
admin.site.register(Project_categories)
admin.site.register(Projects)
|
[
"clbnjoroge@gmail.com"
] |
clbnjoroge@gmail.com
|
ff41fc1eefe176679341826517f20a6a0712d7c4
|
45b3fb7717c23b10d84efb3cfceb546cf0adaa97
|
/blog/posts/forms.py
|
b45ebd4e45d0c77d2ccca303794400d7874f7088
|
[] |
no_license
|
IvanovVitalii/project_n
|
0a7ca549ff0b28162d9f133b8abd1de63f66c9a6
|
8a12956b7aebdba32129b341c3ad5320ae916b89
|
refs/heads/main
| 2023-06-15T03:40:17.673498
| 2021-07-15T10:25:10
| 2021-07-15T10:25:10
| 382,883,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from django import forms
from posts.models import Post, Product
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'content',)
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = ('title', 'content',)
|
[
"ivanov.v.v13@gmail.com"
] |
ivanov.v.v13@gmail.com
|
92683c997042e0e8864518890e6c61563e68ef16
|
8cffac7fa29566c2ce8f4881e5cd8ee04a8b3476
|
/sample_pdfminer/table.py
|
cb270c2a18696cb70b634f0ec429619a3ce200d9
|
[] |
no_license
|
tsukko/sample-prg-python
|
c4ca293430adfa7de8579a3fe7b16561a5091fb4
|
82afffd767c504f5560c9bb7979f4a90ec9c6289
|
refs/heads/master
| 2023-03-04T22:58:39.269926
| 2021-02-10T18:23:27
| 2021-02-10T18:23:27
| 189,197,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
from pdfminer.layout import LTCurve
# 表
class LTTableRect(LTCurve):
def __init__(self, text, bbox, stroke=False, fill=False, evenodd=False, stroking_color=None,
non_stroking_color=None):
(x0, y0, x1, y1) = bbox
self.text = text
LTCurve.__init__(self, 0, [(x0, y0), (x1, y0), (x1, y1), (x0, y1)], stroke, fill, evenodd,
stroking_color, non_stroking_color)
return
def get_text(self):
return self.text
# text文章のブロック
class LTTextBlock(LTCurve):
def __init__(self, text, bbox, stroke=False, fill=False, evenodd=False, stroking_color=None,
non_stroking_color=None):
(x0, y0, x1, y1) = bbox
self.text = text
LTCurve.__init__(self, 0, [(x0, y0), (x1, y0), (x1, y1), (x0, y1)], stroke, fill, evenodd,
stroking_color, non_stroking_color)
return
def get_text(self):
return self.text
# block
class LTBlock(LTCurve):
def __init__(self, text, bbox, stroke=False, fill=False, evenodd=False, stroking_color=None,
non_stroking_color=None):
(x0, y0, x1, y1) = bbox
self.text = text
LTCurve.__init__(self, 0, [(x0, y0), (x1, y0), (x1, y1), (x0, y1)], stroke, fill, evenodd,
stroking_color, non_stroking_color)
return
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
|
[
"tsukko@gmail.com"
] |
tsukko@gmail.com
|
04481c8e9c3a8ab5864fbd9d4073e09189de4c58
|
0953f9aa0606c2dfb17cb61b84a4de99b8af6d2c
|
/python/ray/serve/http_proxy.py
|
e129f5d60cab56228bd2a379ba2a9be0ab162c29
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
oscarknagg/ray
|
da3dc03e24945ff4d5718fd35fc1b3408d8907eb
|
20d47873c9e8f5bbb80fe36e5d16256c337c4db3
|
refs/heads/master
| 2023-09-01T01:45:26.364731
| 2021-10-21T07:46:52
| 2021-10-21T07:46:52
| 382,402,491
| 2
| 1
|
Apache-2.0
| 2021-09-15T12:34:41
| 2021-07-02T16:25:05
|
Python
|
UTF-8
|
Python
| false
| false
| 13,432
|
py
|
import asyncio
import socket
import time
import pickle
from typing import Callable, List, Dict, Optional, Tuple
import uvicorn
import starlette.responses
import starlette.routing
import ray
from ray import serve
from ray.exceptions import RayActorError, RayTaskError
from ray.serve.common import EndpointInfo, EndpointTag
from ray.serve.long_poll import LongPollNamespace
from ray.util import metrics
from ray.serve.utils import logger
from ray.serve.handle import RayServeHandle
from ray.serve.http_util import HTTPRequestWrapper, receive_http_body, Response
from ray.serve.long_poll import LongPollClient
from ray.serve.handle import DEFAULT
MAX_REPLICA_FAILURE_RETRIES = 10
async def _send_request_to_handle(handle, scope, receive, send):
http_body_bytes = await receive_http_body(scope, receive, send)
headers = {k.decode(): v.decode() for k, v in scope["headers"]}
handle = handle.options(
method_name=headers.get("X-SERVE-CALL-METHOD".lower(), DEFAULT.VALUE),
shard_key=headers.get("X-SERVE-SHARD-KEY".lower(), DEFAULT.VALUE),
http_method=scope["method"].upper(),
http_headers=headers,
)
# scope["router"] and scope["endpoint"] contain references to a router
# and endpoint object, respectively, which each in turn contain a
# reference to the Serve client, which cannot be serialized.
# The solution is to delete these from scope, as they will not be used.
# TODO(edoakes): this can be removed once we deprecate the old API.
if "router" in scope:
del scope["router"]
if "endpoint" in scope:
del scope["endpoint"]
# NOTE(edoakes): it's important that we defer building the starlette
# request until it reaches the backend replica to avoid unnecessary
# serialization cost, so we use a simple dataclass here.
request = HTTPRequestWrapper(scope, http_body_bytes)
# Perform a pickle here to improve latency. Stdlib pickle for simple
# dataclasses are 10-100x faster than cloudpickle.
request = pickle.dumps(request)
retries = 0
backoff_time_s = 0.05
while retries < MAX_REPLICA_FAILURE_RETRIES:
object_ref = await handle.remote(request)
try:
result = await object_ref
break
except RayActorError:
logger.warning("Request failed due to replica failure. There are "
f"{MAX_REPLICA_FAILURE_RETRIES - retries} retries "
"remaining.")
await asyncio.sleep(backoff_time_s)
backoff_time_s *= 2
retries += 1
if isinstance(result, RayTaskError):
error_message = "Task Error. Traceback: {}.".format(result)
await Response(
error_message, status_code=500).send(scope, receive, send)
elif isinstance(result, starlette.responses.Response):
await result(scope, receive, send)
else:
await Response(result).send(scope, receive, send)
class LongestPrefixRouter:
"""Router that performs longest prefix matches on incoming routes."""
def __init__(self, get_handle: Callable):
# Function to get a handle given a name. Used to mock for testing.
self._get_handle = get_handle
# Routes sorted in order of decreasing length.
self.sorted_routes: List[str] = list()
# Endpoints associated with the routes.
self.route_info: Dict[str, EndpointTag] = dict()
# Contains a ServeHandle for each endpoint.
self.handles: Dict[str, RayServeHandle] = dict()
def endpoint_exists(self, endpoint: EndpointTag) -> bool:
return endpoint in self.handles
def update_routes(self,
endpoints: Dict[EndpointTag, EndpointInfo]) -> None:
logger.debug(f"Got updated endpoints: {endpoints}.")
existing_handles = set(self.handles.keys())
routes = []
route_info = {}
for endpoint, info in endpoints.items():
# Default case where the user did not specify a route prefix.
if info.route is None:
route = f"/{endpoint}"
else:
route = info.route
routes.append(route)
route_info[route] = endpoint
if endpoint in self.handles:
existing_handles.remove(endpoint)
else:
self.handles[endpoint] = self._get_handle(endpoint)
# Clean up any handles that are no longer used.
for endpoint in existing_handles:
del self.handles[endpoint]
# Routes are sorted in order of decreasing length to enable longest
# prefix matching.
self.sorted_routes = sorted(routes, key=lambda x: len(x), reverse=True)
self.route_info = route_info
def match_route(self, target_route: str
) -> Tuple[Optional[str], Optional[RayServeHandle]]:
"""Return the longest prefix match among existing routes for the route.
Args:
target_route (str): route to match against.
Returns:
(matched_route (str), serve_handle (RayServeHandle)) if found,
else (None, None).
"""
for route in self.sorted_routes:
if target_route.startswith(route):
matched = False
# If the route we matched on ends in a '/', then so does the
# target route and this must be a match.
if route.endswith("/"):
matched = True
# If the route we matched on doesn't end in a '/', we need to
# do another check to ensure that either this is an exact match
# or the next character in the target route is a '/'. This is
# to guard against the scenario where we have '/route' as a
# prefix and there's a request to '/routesuffix'. In this case,
# it should *not* be a match.
elif (len(target_route) == len(route)
or target_route[len(route)] == "/"):
matched = True
if matched:
endpoint = self.route_info[route]
return route, self.handles[endpoint]
return None, None
class HTTPProxy:
"""This class is meant to be instantiated and run by an ASGI HTTP server.
>>> import uvicorn
>>> uvicorn.run(HTTPProxy(controller_name, controller_namespace))
"""
def __init__(self, controller_name: str, controller_namespace: str):
# Set the controller name so that serve will connect to the
# controller instance this proxy is running in.
ray.serve.api._set_internal_replica_context(None, None,
controller_name, None)
# Used only for displaying the route table.
self.route_info: Dict[str, EndpointTag] = dict()
def get_handle(name):
return serve.api._get_global_client().get_handle(
name,
sync=False,
missing_ok=True,
_internal_pickled_http_request=True,
)
self.prefix_router = LongestPrefixRouter(get_handle)
self.long_poll_client = LongPollClient(
ray.get_actor(controller_name, namespace=controller_namespace), {
LongPollNamespace.ROUTE_TABLE: self._update_routes,
},
call_in_event_loop=asyncio.get_event_loop())
self.request_counter = metrics.Counter(
"serve_num_http_requests",
description="The number of HTTP requests processed.",
tag_keys=("route", ))
def _update_routes(self,
endpoints: Dict[EndpointTag, EndpointInfo]) -> None:
self.route_info: Dict[str, Tuple[EndpointTag, List[str]]] = dict()
for endpoint, info in endpoints.items():
route = info.route if info.route is not None else f"/{endpoint}"
self.route_info[route] = endpoint
self.prefix_router.update_routes(endpoints)
async def block_until_endpoint_exists(self, endpoint: EndpointTag,
timeout_s: float):
start = time.time()
while True:
if time.time() - start > timeout_s:
raise TimeoutError(
f"Waited {timeout_s} for {endpoint} to propagate.")
for existing_endpoint in self.route_info.values():
if existing_endpoint == endpoint:
return
await asyncio.sleep(0.2)
async def _not_found(self, scope, receive, send):
current_path = scope["path"]
response = Response(
f"Path '{current_path}' not found. "
"Please ping http://.../-/routes for route table.",
status_code=404)
await response.send(scope, receive, send)
async def __call__(self, scope, receive, send):
"""Implements the ASGI protocol.
See details at:
https://asgi.readthedocs.io/en/latest/specs/index.html.
"""
assert scope["type"] == "http"
self.request_counter.inc(tags={"route": scope["path"]})
if scope["path"] == "/-/routes":
return await starlette.responses.JSONResponse(self.route_info)(
scope, receive, send)
route_prefix, handle = self.prefix_router.match_route(scope["path"])
if route_prefix is None:
return await self._not_found(scope, receive, send)
# Modify the path and root path so that reverse lookups and redirection
# work as expected. We do this here instead of in replicas so it can be
# changed without restarting the replicas.
if route_prefix != "/":
assert not route_prefix.endswith("/")
scope["path"] = scope["path"].replace(route_prefix, "", 1)
scope["root_path"] = route_prefix
await _send_request_to_handle(handle, scope, receive, send)
@ray.remote(num_cpus=0)
class HTTPProxyActor:
def __init__(self,
host: str,
port: int,
controller_name: str,
controller_namespace: str,
http_middlewares: Optional[List[
"starlette.middleware.Middleware"]] = None): # noqa: F821
if http_middlewares is None:
http_middlewares = []
self.host = host
self.port = port
self.setup_complete = asyncio.Event()
self.app = HTTPProxy(controller_name, controller_namespace)
self.wrapped_app = self.app
for middleware in http_middlewares:
self.wrapped_app = middleware.cls(self.wrapped_app,
**middleware.options)
# Start running the HTTP server on the event loop.
# This task should be running forever. We track it in case of failure.
self.running_task = asyncio.get_event_loop().create_task(self.run())
async def ready(self):
"""Returns when HTTP proxy is ready to serve traffic.
Or throw exception when it is not able to serve traffic.
"""
done_set, _ = await asyncio.wait(
[
# Either the HTTP setup has completed.
# The event is set inside self.run.
self.setup_complete.wait(),
# Or self.run errored.
self.running_task,
],
return_when=asyncio.FIRST_COMPLETED)
# Return None, or re-throw the exception from self.running_task.
return await done_set.pop()
async def block_until_endpoint_exists(self, endpoint: EndpointTag,
timeout_s: float):
await self.app.block_until_endpoint_exists(endpoint, timeout_s)
async def run(self):
sock = socket.socket()
# These two socket options will allow multiple process to bind the the
# same port. Kernel will evenly load balance among the port listeners.
# Note: this will only work on Linux.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
try:
sock.bind((self.host, self.port))
except OSError:
# The OS failed to bind a socket to the given host and port.
raise ValueError(
f"""Failed to bind Ray Serve HTTP proxy to '{self.host}:{self.port}'.
Please make sure your http-host and http-port are specified correctly.""")
# Note(simon): we have to use lower level uvicorn Config and Server
# class because we want to run the server as a coroutine. The only
# alternative is to call uvicorn.run which is blocking.
config = uvicorn.Config(
self.wrapped_app,
host=self.host,
port=self.port,
lifespan="off",
access_log=False)
server = uvicorn.Server(config=config)
# TODO(edoakes): we need to override install_signal_handlers here
# because the existing implementation fails if it isn't running in
# the main thread and uvicorn doesn't expose a way to configure it.
server.install_signal_handlers = lambda: None
self.setup_complete.set()
await server.serve(sockets=[sock])
|
[
"noreply@github.com"
] |
oscarknagg.noreply@github.com
|
c8dfde3b6b267ec0e287ec27c1758df7672a6ea0
|
6050eb3d2b7833ac918440539b243a3456164de5
|
/start.py
|
f810e05f34956c350b5c783a0deb3bba107bcfe0
|
[] |
no_license
|
Ma-Min-Min/tiide
|
56f1dfe2294d909b5cf03c8f5eed78c21f4be02b
|
c8eb57032579ba442173c388abc55b2e1148cbc3
|
refs/heads/master
| 2020-03-19T12:18:19.091235
| 2018-06-13T16:34:23
| 2018-06-13T16:34:23
| 136,508,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from flask import Flask
myapp = Flask(__name__)
@myapp.route("/")
def hello():
return "Hello World"
@myapp.route("/tiide")
def tiide():
return "Welcome to TIIDE World"
if __name__=="__main__":
app.run()
|
[
"minmin12697@gmail.com"
] |
minmin12697@gmail.com
|
5dbe640a7962de2d943dfb53aed0930dc9248b69
|
1054d2c6ad221bd5e2ec1cbe496679d6eebd1264
|
/app.py
|
73a8e2a8d7dd19e7b933de6f8d4d90bc295a01c8
|
[] |
no_license
|
feng147258/reconment
|
d2e96253716678b994eebd11b103b7ca89db5ea6
|
a1c8119235c6cc329e76f53f1bb0d4ec8e51f292
|
refs/heads/master
| 2023-07-14T12:00:18.878090
| 2021-08-13T08:58:57
| 2021-08-13T08:58:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/8/3 6:49 PM
# @Author : Yingjun Zhu
# @File : app.py.py
from flask import Flask, jsonify, request
from web.service.pageData import PageData
from dataclean.dao.mysql_db import Mysql
from web.entity.user import UserId
from web.service.logData import LogData
from web.kafka_service import kafka_producter
app = Flask(__name__)
log_data = LogData()
page_data = PageData()
@app.route('/reconmend/get_rec_list', methods=['POST', 'GET'])
def bertsimer():
if request.method == 'POST':
page_size = request.get_json().get("pageSize")
page_num = request\
.get_json().get("pageNum")
user_id = request.get_json().get("userId")
types = request.get_json().get("types")
try:
# data = "page_size:" + str(page_size) + ",page_num:" + str(page_num) + ",user_id:" + str(user_id)
data = page_data.get_page_data(page_size=page_size, page_num=page_num)
return jsonify({"code": 0, "msg": "success", "data": data})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/likes', methods=['POST', 'GET'])
def likes():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
#
if log_data.insert_log(user_id, content_id, title, "likes") and log_data.modify_articles_details("key", "likes"):
kafka_producter.main("recommend", str.encode(str(content_id) + ":likes"))
return jsonify({"code": 0, "msg": "success", "data": "喜欢成功"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "喜欢失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "用户不存在"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/read', methods=['POST', 'GET'])
def read():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
if log_data.insert_log(user_id, content_id, title, "read") and log_data.modify_articles_details("key", "read"):
return jsonify({"code": 0, "msg": "success", "data": "阅读陈宫"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "阅读失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "用户不存在"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/collections', methods=['POST', 'GET'])
def collections():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
if log_data.insert_log(user_id, content_id, title, "collections") and log_data.modify_articles_details("key", "collections"):
return jsonify({"code": 0, "msg": "success", "data": "收藏成功"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "收藏失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "接口操作出现问题"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
def register():
pass
def login():
pass
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=False, port=8080)
|
[
"yingjun.zhu@esoon.com"
] |
yingjun.zhu@esoon.com
|
84bd69b3aecc431f55e1f816dbfe988f0e2443fc
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/mlxnas004/question1.py
|
615d9525446c91fd8b2b6c646c028b7d0a290c6e
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
#nasha meoli
#mlxnas004
#leap year
x = eval(input("Enter a year:\n"))
condition_1 = x%400
condition_2 = x%4
condition_3 = x%100
if (condition_1 == 0) or ((condition_2 == 0) and (condition_3 >= 1)):
print(x,"is a leap year.")
else:
print(x,"is not a leap year.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
7be1adfbae6084374a99a6320ce4b8a69079caf5
|
cd65010c3142b693013984e1eef232b1a34fecc4
|
/main_project/hom.py
|
fd5263c216197fb6160c440260118ea3b7125d6b
|
[] |
no_license
|
simofane4/tkinter
|
26f3a762807db31790477706249fcbf808bae029
|
628ed26198a5aa093488f0dc0ff8bbe255c2043b
|
refs/heads/master
| 2023-04-03T11:29:57.998185
| 2021-04-20T00:55:35
| 2021-04-20T00:55:35
| 359,479,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
from tkinter import *
import tkinter as tk
from PIL import Image, ImageTk
import time
from tkinter import ttk
from time import strftime
app = tk.Tk()
app.geometry("800x800+350+150")
app.title("Gestion de Stock des Pièces de Rechange")
app.configure(background='#0b2239')
app.resizable(width=False, height=True)
#localtime = time.strftime("%H:%M:%S %y-%m-%d")
def enter(event):
lbl_img5.config(bg='white')
def leave(event):
lbl_img5.config(bg='#164777')
def enter_1(event):
lbl_img6.config(bg='white')
def leave_1(event):
lbl_img6.config(bg='#164777')
def enter_2(event):
lbl_img7.config(bg='white')
def leave_2(event):
lbl_img7.config(bg='#164777')
def enter_3(event):
lbl_img8.config(bg='white')
def leave_3(event):
lbl_img8.config(bg='#164777')
def enter_4(event):
lbl_img4.config(bg='white')
def leave_4(event):
lbl_img4.config(bg='#164777')
def enter_5(event):
lbl_img9.config(bg='white')
def leave_5(event):
lbl_img9.config(bg='#164777')
icon = tk.PhotoImage(file="icon.png")
app.call("wm", "iconphoto", app._w, icon)
app.img1 = Image.open("settings.png").resize((80, 80), Image.ANTIALIAS)
app.photo_image1 = ImageTk.PhotoImage(app.img1)
lbl_img1 = Label(image=app.photo_image1, fg="black", bg="#0b2239").place(x=20, y=15, width=80, height=80)
ho_lbl6 = tk.Label(app, text="Gestion de Stock ", fg="white", font=("Times New roman", 16), bg='#0b2239').place(x=115, y=40)
#app.img4 = Image.open("client.png").resize((100, 100), Image.ANTIALIAS)
#app.photo_image4 = ImageTk.PhotoImage(app.img4)
#lbl_img4 = Button(image=app.photo_image4, borderwidth=0, bg="white", activebackground="red").place(x=50, y=150, width=100, height=100)
app.img5 = Image.open("group.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image5 = ImageTk.PhotoImage(app.img5)
lbl_img5 = Button(image=app.photo_image5, borderwidth=0, bg="#164777", activebackground="red")
lbl_img5.place(x=150, y=250, width=150, height=150)
lbl_img5.bind('<Enter>' , enter)
lbl_img5.bind('<Leave>', leave)
app.img6 = Image.open("service.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image6 = ImageTk.PhotoImage(app.img6)
lbl_img6 = Button(image=app.photo_image6, borderwidth=0, bg="#164777", activebackground="red")
lbl_img6.place(x=150, y=410, width=150, height=150)
lbl_img6.bind('<Enter>' , enter_1)
lbl_img6.bind('<Leave>', leave_1)
app.img7 = Image.open("tool-box.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image7 = ImageTk.PhotoImage(app.img7)
lbl_img7 = Button(image=app.photo_image7, borderwidth=0, bg="#164777", activebackground="red")
lbl_img7.place(x=310, y=250, width=150, height=150)
lbl_img7.bind('<Enter>' , enter_2)
lbl_img7.bind('<Leave>', leave_2)
app.img8 = Image.open("cart.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image8 = ImageTk.PhotoImage(app.img8)
lbl_img8 = Button(image=app.photo_image8, borderwidth=0, bg="#164777", activebackground="red")
lbl_img8.place(x=310, y=410, width=150, height=150)
lbl_img8.bind('<Enter>' , enter_3)
lbl_img8.bind('<Leave>', leave_3)
app.img4 = Image.open("invoice.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image4 = ImageTk.PhotoImage(app.img4)
lbl_img4 = Button(image=app.photo_image4, borderwidth=0, bg="#164777", activebackground="red")
lbl_img4.place(x=470, y=250, width=150, height=150)
lbl_img4.bind('<Enter>' , enter_4)
lbl_img4.bind('<Leave>', leave_4)
app.img9 = Image.open("warehouse.png").resize((100, 100), Image.ANTIALIAS)
app.photo_image9 = ImageTk.PhotoImage(app.img9)
lbl_img9 = Button(image=app.photo_image9, borderwidth=0, bg="#164777", activebackground="red")
lbl_img9.place(x=470, y=410, width=150, height=150)
lbl_img9.bind('<Enter>' , enter_5)
lbl_img9.bind('<Leave>', leave_5)
app.mainloop()
|
[
"simofane4@gmail.com"
] |
simofane4@gmail.com
|
b796b20a4d9e957f27a98c703b071bbc111e9bde
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/ancpvlanrange_58418cab117460d5be96e7c24e4e1bfb.py
|
00c591b0ead7d391ee148ba1bd8b5a0ea079d425
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 12,400
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AncpVlanRange(Base):
"""
The AncpVlanRange class encapsulates a required ancpVlanRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'ancpVlanRange'
def __init__(self, parent):
super(AncpVlanRange, self).__init__(parent)
@property
def VlanIdInfo(self):
"""An instance of the VlanIdInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e.VlanIdInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e import VlanIdInfo
return VlanIdInfo(self)
@property
def Enabled(self):
"""Disabled ranges won't be configured nor validated.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def FirstId(self):
"""DEPRECATED The first ID to be used for the first VLAN tag.
Returns:
number
"""
return self._get_attribute('firstId')
@FirstId.setter
def FirstId(self, value):
self._set_attribute('firstId', value)
@property
def IdIncrMode(self):
"""Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Returns:
number
"""
return self._get_attribute('idIncrMode')
@IdIncrMode.setter
def IdIncrMode(self, value):
self._set_attribute('idIncrMode', value)
@property
def Increment(self):
"""DEPRECATED Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('increment')
@Increment.setter
def Increment(self, value):
self._set_attribute('increment', value)
@property
def IncrementStep(self):
"""DEPRECATED Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('incrementStep')
@IncrementStep.setter
def IncrementStep(self, value):
self._set_attribute('incrementStep', value)
@property
def InnerEnable(self):
"""DEPRECATED Enable the inner VLAN.
Returns:
bool
"""
return self._get_attribute('innerEnable')
@InnerEnable.setter
def InnerEnable(self, value):
self._set_attribute('innerEnable', value)
@property
def InnerFirstId(self):
"""DEPRECATED The first ID to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerFirstId')
@InnerFirstId.setter
def InnerFirstId(self, value):
self._set_attribute('innerFirstId', value)
@property
def InnerIncrement(self):
"""DEPRECATED Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('innerIncrement')
@InnerIncrement.setter
def InnerIncrement(self, value):
self._set_attribute('innerIncrement', value)
@property
def InnerIncrementStep(self):
"""DEPRECATED Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('innerIncrementStep')
@InnerIncrementStep.setter
def InnerIncrementStep(self, value):
self._set_attribute('innerIncrementStep', value)
@property
def InnerPriority(self):
"""DEPRECATED The 802.1Q priority to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerPriority')
@InnerPriority.setter
def InnerPriority(self, value):
self._set_attribute('innerPriority', value)
@property
def InnerTpid(self):
"""DEPRECATED The TPID value in the inner VLAN Tag.
Returns:
str
"""
return self._get_attribute('innerTpid')
@InnerTpid.setter
def InnerTpid(self, value):
self._set_attribute('innerTpid', value)
@property
def InnerUniqueCount(self):
"""DEPRECATED Number of unique inner VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('innerUniqueCount')
@InnerUniqueCount.setter
def InnerUniqueCount(self, value):
self._set_attribute('innerUniqueCount', value)
@property
def Name(self):
"""Name of range
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def ObjectId(self):
"""Unique identifier for this object
Returns:
str
"""
return self._get_attribute('objectId')
@property
def Priority(self):
"""DEPRECATED The 802.1Q priority to be used for the outer VLAN tag.
Returns:
number
"""
return self._get_attribute('priority')
@Priority.setter
def Priority(self, value):
self._set_attribute('priority', value)
@property
def Tpid(self):
"""DEPRECATED The TPID value in the outer VLAN Tag.
Returns:
str
"""
return self._get_attribute('tpid')
@Tpid.setter
def Tpid(self, value):
self._set_attribute('tpid', value)
@property
def UniqueCount(self):
"""DEPRECATED Number of unique first VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('uniqueCount')
@UniqueCount.setter
def UniqueCount(self, value):
self._set_attribute('uniqueCount', value)
def update(self, Enabled=None, FirstId=None, IdIncrMode=None, Increment=None, IncrementStep=None, InnerEnable=None, InnerFirstId=None, InnerIncrement=None, InnerIncrementStep=None, InnerPriority=None, InnerTpid=None, InnerUniqueCount=None, Name=None, Priority=None, Tpid=None, UniqueCount=None):
"""Updates a child instance of ancpVlanRange on the server.
Args:
Enabled (bool): Disabled ranges won't be configured nor validated.
FirstId (number): The first ID to be used for the first VLAN tag.
IdIncrMode (number): Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Increment (number): Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
IncrementStep (number): Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerEnable (bool): Enable the inner VLAN.
InnerFirstId (number): The first ID to be used for the inner VLAN tag.
InnerIncrement (number): Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
InnerIncrementStep (number): Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerPriority (number): The 802.1Q priority to be used for the inner VLAN tag.
InnerTpid (str): The TPID value in the inner VLAN Tag.
InnerUniqueCount (number): Number of unique inner VLAN IDs to use.
Name (str): Name of range
Priority (number): The 802.1Q priority to be used for the outer VLAN tag.
Tpid (str): The TPID value in the outer VLAN Tag.
UniqueCount (number): Number of unique first VLAN IDs to use.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
3781aec365f284490c5bfb10dcd9c409d8e70233
|
69f5b9375c4ae3908cfbd29c0b6d10117de31e5d
|
/predict_frete.py
|
0f394a310a71fbbfcc1e22f6c47df92383025ca9
|
[] |
no_license
|
deepsideoflearning/freight_predictive_model
|
f3a898eb87bf8f03ab15bce065b7945c060b0655
|
11965ce729d406a2748d81759e2dc686769d130f
|
refs/heads/master
| 2021-09-14T20:34:46.784241
| 2018-05-18T19:45:03
| 2018-05-18T19:45:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
from util import *
import pickle
import numpy as np
df = pd.read_excel('novas_rotas.xlsx')
data = select_cols(df)
data = make_dummies(data)
data = complete_cols(data)
norm_data = np.asarray(data)
X = norm_data[:, 1:]
y_norm = pickle.load(open('y_norm.pkl', 'rb'))
X_norm = pickle.load(open('X_norm.pkl', 'rb'))
X = X_norm.transform(X)
model = rnn_model(X)
model.load_weights('model_frete_weights.hdf5')
result = model.predict(X)
result = y_norm.inverse_transform(result)
df['Frete por kg'] = result
df.to_csv('predict.csv', encoding='latin1')
|
[
"br_aquino@yahoo.com.br"
] |
br_aquino@yahoo.com.br
|
fc11ed2f1b37ee77de7206c64314dedf713c76d6
|
ac5d3907a4e1333dc89c8d037b555113fc950db3
|
/MainPage/migrations/0002_remove_post_preview_image.py
|
28ae2721e90c28c5d54196102ce4a980806528c4
|
[] |
no_license
|
Shkuin/CyberWorld
|
29e12cf56c7aa58079d44a7d008efeed05fee872
|
153d834cc7e33b75e46c534bebfd187ee00a4852
|
refs/heads/master
| 2022-11-10T09:12:34.529883
| 2020-06-28T17:02:10
| 2020-06-28T17:02:10
| 273,578,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Generated by Django 3.0.5 on 2020-04-17 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('MainPage', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='preview_image',
),
]
|
[
"bioniklsn123@mail.ru"
] |
bioniklsn123@mail.ru
|
c4813a92e720fa53d9eefd406d9c0a0a181b36c8
|
dead387c1bd3f3193d0f8ec980b9d8103cfc3113
|
/process_create.py
|
02daa051df5a1258131d48327e0473f9dcfc9986
|
[] |
no_license
|
purndaum/web3
|
c593eaccbff50895e729acb2c342ee961345932f
|
96d57e8e7ff635113c237fd55bf7929de7fad281
|
refs/heads/master
| 2021-05-22T18:19:22.350482
| 2020-04-04T15:51:20
| 2020-04-04T15:51:20
| 251,065,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
#!python
import cgi
form = cgi.FieldStorage()
title = form["title"].value
description = form['description'].value
opened_file = open('data/'+title, 'w')
opened_file.write(description)
opened_file.close()
#Redirection
print("Location: index.py?id="+title)
print()
|
[
"noreply@github.com"
] |
purndaum.noreply@github.com
|
e4275df4e69cf6565d2afddbef18539b2d4d99f3
|
4f875744ccae8fa9225318ce16fc483b7bf2735e
|
/google/findDuplicate.py
|
44e01dd1b67af92eaf0af5a61e728e840331fdcb
|
[] |
no_license
|
nguyenngochuy91/companyQuestions
|
62c0821174bb3cb33c7af2c5a1e83a60e4a29977
|
c937fe19be665ba7ac345e1729ff531f370f30e8
|
refs/heads/master
| 2020-07-27T05:58:36.794033
| 2020-04-10T20:57:15
| 2020-04-10T20:57:15
| 208,893,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 20:34:41 2020
@author: huyn
"""
#609. Find Duplicate File in System
from typing import List
class Solution:
def findDuplicate(self, paths: List[str]) -> List[List[str]]:
d = {}
for path in paths:
item = path.split()
root = item[0]
for file in item[1:]:
file = file.split("(")
fileName = file[0]
content = file[1].split(")")[0]
if content not in d:
d[content] = []
d[content].append(root+"/"+fileName)
return [d[key] for key in d if len(d[key])>=2]
|
[
"huyn@cvm6h4zv52.cvm.iastate.edu"
] |
huyn@cvm6h4zv52.cvm.iastate.edu
|
057960e15bf592de3eaac0311f6e861f90dda900
|
2562c465fbf059b8846acbcb13442347c5fd058d
|
/src/pms7003.py
|
56b466c6621cbbd6345763080b9c6dc1bceeaac2
|
[] |
no_license
|
dawncold/raspberry_pms7003
|
3d343836cb49851483eec6babe4bec62cd3260a4
|
f45571f2a7ad5c38fad86c873434188e1f582a5e
|
refs/heads/master
| 2021-04-27T00:12:18.906348
| 2019-08-04T13:37:18
| 2019-08-04T13:37:18
| 123,770,113
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
#! coding: utf-8
from __future__ import unicode_literals, print_function, division
import serial
SERIAL_DEVICE = '/dev/ttyAMA0'
HEAD_FIRST = 0x42
HEAD_SECOND = 0x4d
DATA_LENGTH = 32
BODY_LENGTH = DATA_LENGTH - 1 - 1
P_CF_PM10 = 2
P_CF_PM25 = 4
P_CF_PM100 = 6
P_C_PM10 = 8
P_C_PM25 = 10
P_C_PM100 = 12
P_C_03 = 14
P_C_05 = 16
P_C_10 = 18
P_C_25 = 20
P_C_50 = 22
P_C_100 = 24
DATA_DESC = [
(P_CF_PM10, 'CF=1, PM1.0', 'μg/m3'),
(P_CF_PM25, 'CF=1, PM2.5', 'μg/m3'),
(P_CF_PM100, 'CF=1, PM10', 'μg/m3'),
(P_C_PM10, 'PM1.0', 'μg/m3'),
(P_C_PM25, 'PM2.5', 'μg/m3'),
(P_C_PM100, 'PM10', 'μg/m3'),
(P_C_03, '0.1L, d>0.3μm', ''),
(P_C_05, '0.1L, d>0.5μm', ''),
(P_C_10, '0.1L, d>1μm', ''),
(P_C_25, '0.1L, d>2.5μm', ''),
(P_C_50, '0.1L, d>5.0μm', ''),
(P_C_100, '0.1L, d>10μm', ''),
]
def get_frame(_serial):
while True:
b = _serial.read()
if b != chr(HEAD_FIRST):
continue
b = _serial.read()
if b != chr(HEAD_SECOND):
continue
body = _serial.read(BODY_LENGTH)
if len(body) != BODY_LENGTH:
continue
return body
def get_frame_length(_frame):
h8 = ord(_frame[0])
l8 = ord(_frame[1])
return int(h8 << 8 | l8)
def get_version_and_error_code(_frame):
return _frame[-4], _frame[-3]
def valid_frame_checksum(_frame):
checksum = ord(_frame[-2]) << 8 | ord(_frame[-1])
calculated_checksum = HEAD_FIRST + HEAD_SECOND
for field in _frame[:-2]:
calculated_checksum += ord(field)
return checksum == calculated_checksum
def decode_frame(_frame):
data = {}
for item in DATA_DESC:
start, desc, unit = item
value = int(ord(_frame[start]) << 8 | ord(_frame[start + 1]))
data[str(start)] = (desc, value, unit)
return data
def read_data():
ser = serial.Serial(port=SERIAL_DEVICE, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE)
try:
frame = get_frame(ser)
except Exception as e:
print('get frame got exception: {}'.format(e.message))
else:
if not valid_frame_checksum(frame):
print('frame checksum mismatch')
return
data = {'data': decode_frame(frame)}
version, error_code = get_version_and_error_code(frame)
data['version'] = version
data['errcode'] = error_code
return data
finally:
ser.close()
if __name__ == '__main__':
data = read_data()
if not data:
print('no data')
exit(0)
if data['errcode'] != '\0':
print('got error: {}'.format(data['errcode']))
exit(-1)
for k in sorted(data['data'], key=lambda x: int(x)):
v = data['data'][k]
print('{}: {} {}'.format(v[0], v[1], v[2]))
|
[
"loooseleaves@gmail.com"
] |
loooseleaves@gmail.com
|
d65a31c823fa8efead544ec0a4f9c5345bc0530f
|
e6f0ebccf689ca4a3eb1b2349f0d9a8aa1af081e
|
/iblog/blog/models.py
|
0ca0d664c5dbb86c34d112017f146c6a8b048cb5
|
[] |
no_license
|
pragy540/IBlog
|
d5175b485903b8c454b9ba6ea29b34f9af97ade3
|
1615972d2ccdaf2cb586486456e210f1c86fa355
|
refs/heads/master
| 2022-07-09T00:30:35.227168
| 2020-05-17T14:14:44
| 2020-05-17T14:14:44
| 264,673,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from django.db import models
from django.utils.timezone import now
# Create your models here.
class Post(models.Model):
post_id=models.AutoField(primary_key= True)
author=models.CharField(max_length=100)
title=models.CharField(max_length=150)
content=models.TextField()
timeStamp=models.DateTimeField(default=now)
def __str__(self):
return self.title+" by "+ self.author
|
[
"pragyaptl131996@gmail.com"
] |
pragyaptl131996@gmail.com
|
c07ba76a6ce1700bed5939dd56790525d85ad59a
|
3e64d1fb4998fae24a4178d0925e0f30e30b00e7
|
/venv/lib/python3.8/encodings/utf_7.py
|
fabe5e915e16c26cdb4c57b6fa50ed8570d0dee2
|
[] |
no_license
|
viraatdas/Model-Rest-API
|
a39e150c484c7136141f462932d741de5b45e044
|
a08500a28e4ad32094de6f88223088b9a9081d69
|
refs/heads/master
| 2022-11-12T15:33:06.624474
| 2020-07-05T05:04:50
| 2020-07-05T05:04:50
| 257,821,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/encodings/utf_7.py
|
[
"viraat.laldas@gmail.com"
] |
viraat.laldas@gmail.com
|
0fdb7a7c501f03fb7f776e4965cd4da3243f4ed9
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/squareroot/7ab7bec6-576b-4910-98d1-ec30c84244ab__calculate_square.py
|
0bf1d0137076df117eaec3d77052d26dce255f54
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621
| 2020-11-20T23:50:11
| 2020-11-20T23:50:11
| 60,889,742
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
# calculate_square.py
from Tkinter import *
import ttk
def calculate_square(*args):
value_in = float(number_in.get())
number_out.set(value_in * value_in)
root = Tk()
root.title('Calculate square')
mainframe = ttk.Frame(root)
mainframe.grid(column=1, row=1, sticky=(N, E, S, W))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
number_in = StringVar()
number_out = StringVar()
square_of_string_label = ttk.Label(mainframe, text='The square of')
square_of_string_label.grid(column=1, row=1, sticky=E)
number_in_entry = ttk.Entry(mainframe, width=5, textvariable=number_in)
number_in_entry.grid(column=2, row=1, sticky=(E, W))
is_string_label = ttk.Label(mainframe, text='is')
is_string_label.grid(column=1, row=2, sticky=E)
number_out_label = ttk.Label(mainframe, textvariable=number_out)
number_out_label.grid(column=2, row=2, sticky=W)
go_button = ttk.Button(mainframe, text='Go!', command=calculate_square)
go_button.grid(column=2, row=3, sticky=W)
for child in mainframe.winfo_children():
child.grid_configure(padx=2, pady=2)
number_in_entry.focus()
root.bind('<Return>', calculate_square)
root.mainloop()
|
[
"erickmiller@gmail.com"
] |
erickmiller@gmail.com
|
6f7d487f3b03c0c24d82708a38a6e037e81955a4
|
a8dfac05ab2726f00db4630a47741768174e3e96
|
/Information-Extraction/QANet/config.py
|
400ebd2a5f4b68c3390c842208e7b0e8622f0754
|
[] |
no_license
|
mohitsshah/documents-caf
|
0ba33a565ce272c3fb19a5cd388e8508825d15ba
|
7e457a2fc1f942de7ef2606feb860e6a952b41ef
|
refs/heads/master
| 2020-03-11T01:28:15.811371
| 2018-05-31T09:15:04
| 2018-05-31T09:15:04
| 129,692,354
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,809
|
py
|
import os
import tensorflow as tf
import json
# from prepro import prepro
# from main import train, test, demo
flags = tf.flags
home = os.path.expanduser("~")
train_file = os.path.join(home, "data", "squad", "train-v1.1.json")
dev_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
test_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
glove_word_file = os.path.join(home, "data", "glove", "glove.840B.300d.txt")
train_dir = "models"
model_name = "FRC"
dir_name = os.path.join(train_dir, model_name)
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(os.path.join(os.getcwd(),dir_name)):
os.mkdir(os.path.join(os.getcwd(),dir_name))
target_dir = "data"
log_dir = os.path.join(dir_name, "event")
save_dir = os.path.join(dir_name, "model")
answer_dir = os.path.join(dir_name, "answer")
train_record_file = os.path.join(target_dir, "train.tfrecords")
dev_record_file = os.path.join(target_dir, "dev.tfrecords")
test_record_file = os.path.join(target_dir, "test.tfrecords")
word_emb_file = os.path.join(target_dir, "word_emb.json")
char_emb_file = os.path.join(target_dir, "char_emb.json")
train_eval = os.path.join(target_dir, "train_eval.json")
dev_eval = os.path.join(target_dir, "dev_eval.json")
test_eval = os.path.join(target_dir, "test_eval.json")
dev_meta = os.path.join(target_dir, "dev_meta.json")
test_meta = os.path.join(target_dir, "test_meta.json")
word_dictionary = os.path.join(target_dir, "word_dictionary.json")
char_dictionary = os.path.join(target_dir, "char_dictionary.json")
answer_file = os.path.join(answer_dir, "answer.json")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(answer_dir):
os.makedirs(answer_dir)
flags.DEFINE_string("mode", "train", "Running mode train/debug/test")
flags.DEFINE_string("target_dir", target_dir, "Target directory for out data")
flags.DEFINE_string("log_dir", log_dir, "Directory for tf event")
flags.DEFINE_string("save_dir", save_dir, "Directory for saving model")
flags.DEFINE_string("train_file", train_file, "Train source file")
flags.DEFINE_string("dev_file", dev_file, "Dev source file")
flags.DEFINE_string("test_file", test_file, "Test source file")
flags.DEFINE_string("glove_word_file", glove_word_file, "Glove word embedding source file")
flags.DEFINE_string("train_record_file", train_record_file, "Out file for train data")
flags.DEFINE_string("dev_record_file", dev_record_file, "Out file for dev data")
flags.DEFINE_string("test_record_file", test_record_file, "Out file for test data")
flags.DEFINE_string("word_emb_file", word_emb_file, "Out file for word embedding")
flags.DEFINE_string("char_emb_file", char_emb_file, "Out file for char embedding")
flags.DEFINE_string("train_eval_file", train_eval, "Out file for train eval")
flags.DEFINE_string("dev_eval_file", dev_eval, "Out file for dev eval")
flags.DEFINE_string("test_eval_file", test_eval, "Out file for test eval")
flags.DEFINE_string("dev_meta", dev_meta, "Out file for dev meta")
flags.DEFINE_string("test_meta", test_meta, "Out file for test meta")
flags.DEFINE_string("answer_file", answer_file, "Out file for answer")
flags.DEFINE_string("word_dictionary", word_dictionary, "Word dictionary")
flags.DEFINE_string("char_dictionary", char_dictionary, "Character dictionary")
flags.DEFINE_integer("glove_char_size", 94, "Corpus size for Glove")
flags.DEFINE_integer("glove_word_size", int(2.2e6), "Corpus size for Glove")
flags.DEFINE_integer("glove_dim", 300, "Embedding dimension for Glove")
flags.DEFINE_integer("char_dim", 64, "Embedding dimension for char")
flags.DEFINE_integer("para_limit", 400, "Limit length for paragraph")
flags.DEFINE_integer("ques_limit", 50, "Limit length for question")
flags.DEFINE_integer("ans_limit", 30, "Limit length for answers")
flags.DEFINE_integer("test_para_limit", 1000, "Limit length for paragraph in test file")
flags.DEFINE_integer("test_ques_limit", 100, "Limit length for question in test file")
flags.DEFINE_integer("char_limit", 16, "Limit length for character")
flags.DEFINE_integer("word_count_limit", -1, "Min count for word")
flags.DEFINE_integer("char_count_limit", -1, "Min count for char")
flags.DEFINE_integer("capacity", 15000, "Batch size of dataset shuffle")
flags.DEFINE_integer("num_threads", 4, "Number of threads in input pipeline")
flags.DEFINE_boolean("is_bucket", False, "build bucket batch iterator or not")
flags.DEFINE_list("bucket_range", [40, 401, 40], "the range of bucket")
flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_integer("num_steps", 60000, "Number of steps")
flags.DEFINE_integer("checkpoint", 1000, "checkpoint to save and evaluate the model")
flags.DEFINE_integer("period", 100, "period to save batch loss")
flags.DEFINE_integer("val_num_batches", 150, "Number of batches to evaluate the model")
flags.DEFINE_float("dropout", 0.1, "Dropout prob across the layers")
flags.DEFINE_float("grad_clip", 5.0, "Global Norm gradient clipping rate")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_float("decay", 0.9999, "Exponential moving average decay")
flags.DEFINE_float("l2_norm", 3e-7, "L2 norm scale")
flags.DEFINE_integer("hidden", 96, "Hidden size")
flags.DEFINE_integer("num_heads", 1, "Number of heads in self attention")
flags.DEFINE_boolean("q2c", True, "Whether to use query to context attention or not")
# Extensions (Uncomment corresponding code in download.sh to download the required data)
glove_char_file = os.path.join(home, "data", "glove", "glove.840B.300d-char.txt")
flags.DEFINE_string("glove_char_file", glove_char_file, "Glove character embedding source file")
flags.DEFINE_boolean("pretrained_char", False, "Whether to use pretrained character embedding")
fasttext_file = os.path.join(home, "data", "fasttext", "wiki-news-300d-1M.vec")
flags.DEFINE_string("fasttext_file", fasttext_file, "Fasttext word embedding source file")
flags.DEFINE_boolean("fasttext", False, "Whether to use fasttext")
def main(_):
config = flags.FLAGS
params = config.flag_values_dict()
del params["mode"]
with open("config.json", "w") as fi:
fi.write(json.dumps(params))
# if config.mode == "train":
# train(config)
# elif config.mode == "prepro":
# prepro(config)
# elif config.mode == "debug":
# config.num_steps = 2
# config.val_num_batches = 1
# config.checkpoint = 1
# config.period = 1
# train(config)
# elif config.mode == "test":
# test(config)
# elif config.mode == "demo":
# demo(config)
# else:
# print("Unknown mode")
# exit(0)
if __name__ == "__main__":
tf.app.run()
|
[
"mohit@Sandeeps-MacBook-Air.local"
] |
mohit@Sandeeps-MacBook-Air.local
|
201ec0e778d39c619ca7d2db0f6caee17ddd1f95
|
d7363da78e6f1e8ae2c6abca3f845853756165d4
|
/src/adafruit_blinka/board/dragonboard_410c.py
|
a627309d6c32ff8ab6a13dc5b5cc9a989804b538
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Blinka
|
7a9ed88f39ff12082d1b46647fa8869b541fba49
|
009b352a3234339000c32d2e61e830455cf389fa
|
refs/heads/main
| 2023-08-09T06:25:02.178935
| 2023-07-28T16:45:40
| 2023-07-28T16:45:40
| 120,540,744
| 398
| 331
|
MIT
| 2023-09-14T20:32:23
| 2018-02-07T00:25:03
|
Python
|
UTF-8
|
Python
| false
| false
| 972
|
py
|
# SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the Dragonboard 410c."""
from adafruit_blinka.microcontroller.snapdragon.apq8016 import pin
GPIO_A = pin.GPIO_36
GPIO_B = pin.GPIO_12
GPIO_C = pin.GPIO_13
GPIO_D = pin.GPIO_69
GPIO_E = pin.GPIO_115
GPIO_F = pin.PM_MPP_4
GPIO_G = pin.GPIO_24
GPIO_H = pin.GPIO_25
GPIO_I = pin.GPIO_35
GPIO_J = pin.GPIO_34
GPIO_K = pin.GPIO_28
GPIO_L = pin.GPIO_33
GPIO_36 = pin.GPIO_36
GPIO_12 = pin.GPIO_12
GPIO_13 = pin.GPIO_13
GPIO_69 = pin.GPIO_69
GPIO_115 = pin.GPIO_115
GPIO_4 = pin.PM_MPP_4
GPIO_24 = pin.GPIO_24
GPIO_25 = pin.GPIO_25
GPIO_35 = pin.GPIO_35
GPIO_34 = pin.GPIO_34
GPIO_28 = pin.GPIO_28
GPIO_33 = pin.GPIO_33
SDA = pin.I2C0_SDA
SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
I2C0_SCL = pin.I2C0_SCL
I2C1_SDA = pin.I2C1_SDA
I2C1_SCL = pin.I2C1_SCL
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_MOSI
MISO = pin.SPI0_MISO
SPI_CS = pin.SPI0_CS
|
[
"melissa@adafruit.com"
] |
melissa@adafruit.com
|
c3c919f2d48788afdacf95e58dcf569a9f7e7671
|
15611d5e76b1f25e04755bc9370390cd11ee004c
|
/revision/data/awardfix.py
|
e2b6ce6b7c2c474cda9ce32b2e4c39df885e0d86
|
[] |
no_license
|
paultopia/paulgowdercom
|
dd3d09fb027142188bf49ff3d65bbac600593ff1
|
b9583c4bae6b2db7274ebdaee799d7c675866160
|
refs/heads/master
| 2020-04-15T14:30:01.907498
| 2017-07-25T19:25:39
| 2017-07-25T19:25:39
| 51,056,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
import json
with open('awards.json') as aj:
old = json.load(aj)
new = [{"award": x["name"], "year": x["year"]} for x in old]
with open('awards2.json', 'w') as aj2:
json.dump(new, aj2)
|
[
"paul.gowder@gmail.com"
] |
paul.gowder@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.