blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5778be46b1c38b2fd8acfee1a06f2bd935fbd86 | 02e930875e95713a387c60b7de816a3a354ceb3d | /bids/variables/entities.py | b5dc02b76d6a3ff0ad3e12c9c2677fdeba6a467d | [
"MIT"
] | permissive | Islast/pybids | 6a7899cbef03ec3579a21e1cbe85deb5d180ca1e | 3e80e617e6bd6258e027c9937ccedafe1c8e6b14 | refs/heads/master | 2020-03-23T22:00:14.970425 | 2018-07-24T14:14:08 | 2018-07-24T14:14:08 | 142,146,832 | 0 | 0 | MIT | 2018-07-24T11:06:51 | 2018-07-24T11:06:51 | null | UTF-8 | Python | false | false | 6,874 | py | from itertools import chain
from collections import namedtuple
from . import kollekshuns as clc
import pandas as pd
class Node(object):
''' Base class for objects that represent a single object in the BIDS
hierarchy.
Args:
id (int, str): A value uniquely identifying this node. Typically the
entity value extracted from the filename via grabbids.
'''
def __init__(self, level, entities):
self.level = level
self.entities = entities
self.variables = {}
def add_variable(self, variable):
''' Adds a BIDSVariable to the current Node's list.
Args:
variable (BIDSVariable): The Variable to add to the list.
'''
self.variables[variable.name] = variable
class RunNode(Node):
''' Represents a single Run in a BIDS project.
Args:
id (int): The index of the run.
entities (dict): Dictionary of entities for this Node.
image_file (str): The full path to the corresponding nifti image.
duration (float): Duration of the run, in seconds.
repetition_time (float): TR for the run.
task (str): The task name for this run.
'''
def __init__(self, entities, image_file, duration, repetition_time):
self.image_file = image_file
self.duration = duration
self.repetition_time = repetition_time
super(RunNode, self).__init__('run', entities)
def get_info(self):
return RunInfo(self.entities, self.duration, self.repetition_time,
self.image_file)
# Stores key information for each Run.
RunInfo = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image'])
class NodeIndex(Node):
''' Represents the top level in a BIDS hierarchy. '''
def __init__(self):
self.index = pd.DataFrame()
self.nodes = []
def get_collections(self, unit, names=None, merge=False,
sampling_rate=None, **entities):
''' Retrieve variable data for a specified level in the Dataset.
Args:
unit (str): The unit of analysis to return variables for. Must be
one of 'run', 'session', 'subject', or 'dataset'.
names (list): Optional list of variables names to return. If
None, all available variables are returned.
merge (bool): If True, variables are merged across all observations
of the current unit. E.g., if unit='subject' and return_type=
'collection', variablesfrom all subjects will be merged into a
single collection. If False, each observation is handled
separately, and the result is returned as a list.
sampling_rate (int, str): If level='run', the sampling rate to
pass onto the returned BIDSRunVariableCollection.
entities: Optional constraints used to limit what gets returned.
Returns:
'''
nodes = self.get_nodes(unit, entities)
var_sets = []
for n in nodes:
var_set = list(n.variables.values())
var_set = [v for v in var_set if v.matches_entities(entities)]
if names is not None:
var_set = [v for v in var_set if v.name in names]
# Additional filtering on Variables past run level, because their
# contents are extracted from TSV files containing rows from
# multiple observations
if unit != 'run':
var_set = [v.filter(entities) for v in var_set]
var_sets.append(var_set)
if merge:
var_sets = [list(chain(*var_sets))]
results = []
for vs in var_sets:
if not vs:
continue
if unit == 'run':
vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
else:
vs = clc.BIDSVariableCollection(vs)
results.append(vs)
if merge:
return results[0]
return results
def get_nodes(self, level=None, entities=None, strict=False):
entities = {} if entities is None else entities.copy()
if level is not None:
entities['level'] = level
if entities is None:
return self.nodes
match_ents = set(entities.keys())
common_cols = list(match_ents & set(self.index.columns))
if strict and match_ents - common_cols:
raise ValueError("Invalid entities: ", match_ents - common_cols)
if not common_cols:
return self.nodes
# Construct query string that handles both single values and iterables
query = []
for col in common_cols:
oper = 'in' if isinstance(entities[col], (list, tuple)) else '=='
q = '{name} {oper} {val}'.format(name=col, oper=oper,
val=repr(entities[col]))
query.append(q)
query = ' and '.join(query)
rows = self.index.query(query)
if rows.empty:
return []
# Sort and return
sort_cols = ['subject', 'session', 'task', 'run']
sort_cols = [sc for sc in sort_cols if sc in set(rows.columns)]
sort_cols += list(set(rows.columns) - set(sort_cols))
rows = rows.sort_values(sort_cols)
inds = rows['node_index'].astype(int)
return [self.nodes[i] for i in inds]
def get_or_create_node(self, level, entities, *args, **kwargs):
''' Retrieves a child Node based on the specified criteria, creating a
new Node if necessary.
Args:
entities (dict): Dictionary of entities specifying which Node to
return.
args, kwargs: Optional positional or named arguments to pass onto
class-specific initializers. These arguments are only used if
a Node that matches the passed entities doesn't already exist,
and a new one must be created.
Returns:
A Node instance.
'''
result = self.get_nodes(level, entities)
if result:
if len(result) > 1:
raise ValueError("More than one matching Node found! If you're"
" expecting more than one Node, use "
"get_nodes() instead of get_or_create_node()."
)
return result[0]
# Create Node
if level == 'run':
node = RunNode(entities, *args, **kwargs)
else:
node = Node(level, entities)
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
node_row = pd.Series(entities)
self.index = self.index.append(node_row, ignore_index=True)
return node
| [
"tyarkoni@gmail.com"
] | tyarkoni@gmail.com |
ab9d39808661bb3098b6747960768e9d011d9399 | 42516b0348936e257d04113c2e632dc72ba58e91 | /test_env/test_suit_stress_test_camera1/test_suit_stress_test_camera1_case000017.py | 79f22f009c1b436dedff6bac1f89f0baac4b734c | [] | no_license | wwlwwlqaz/Qualcomm | 2c3a225875fba955d771101f3c38ca0420d8f468 | a04b717ae437511abae1e7e9e399373c161a7b65 | refs/heads/master | 2021-01-11T19:01:06.123677 | 2017-04-05T07:57:21 | 2017-04-05T07:57:21 | 79,292,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,250 | py | '''
@author: c_caijie
'''
import fs_wrapper
import settings.common as SC
from case_utility import *
from logging_wrapper import log_test_case, save_fail_log, print_report_line
from test_case_base import TestCaseBase
from qrd_shared.case import *
from qrd_shared.mms.Mms import *
class test_suit_stress_test_camera1_case000017(TestCaseBase):
'''
@see: L{TestCaseBase <TestCaseBase>}
'''
'''
"Procedure:
Step1:Launch Camera and sleep 1s;
Step2:press home,sleep 1s re-launch camera;
Step3: repeat step2 for 200 time
Verification:
ER2:No crash,No ANR
'''
def test_case_main(self, case_results):
global case_flag , TAG, testresult, success_flag, i, success_time
case_flag = False
testresult = []
success_time = 0
fail_time = 0
iterationNum = 200
TAG = "Dev-ci cases: Camera "
log_test_framework(TAG, self.name+" -Start")
"""
cases contents you need to add
"""
if search_text("Close app", searchFlag=TEXT_CONTAINS):
click_button_by_text("Close app", searchFlag=TEXT_CONTAINS)
sleep(2)
start_activity('org.codeaurora.snapcam','com.android.camera.CameraLauncher')
if wait_for_fun(lambda:search_view_by_id('permission_allow_button'), True, 10):
click_button_by_id('permission_allow_button')
if wait_for_fun(lambda:search_text('OK'), True, 5):
click_textview_by_text('OK')
for i in range(iterationNum):
print_log_line('This is iteration %d'%(i+1))
success_flag = False
if wait_for_fun(lambda:search_view_by_id('shutter_button'), True, 10):
log_test_framework("step1:", "Launch camera pass")
sleep(1)
send_key(KEY_HOME)
sleep(1)
start_activity('org.codeaurora.snapcam','com.android.camera.CameraLauncher')
if wait_for_fun(lambda:search_view_by_id('shutter_button'), True, 8):
log_test_framework("step2:", "press home and re-launch camera pass")
success_flag = True
if success_flag == True:
testresult.append('%d.Pass'%(i+1))
success_time=success_time+1
print_log(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], '%d.Pass'%(i+1))
else:
testresult.append('%d.Fail'%(i+1))
take_screenshot()
fail_time=fail_time+1
print_log(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], '%d.Fail'%(i+1))
if search_text("Close app", searchFlag=TEXT_CONTAINS):
print_log(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], '%d.Popup Close app'%(i+1))
click_button_by_text("Close app", searchFlag=TEXT_CONTAINS)
sleep(2)
if success_time == iterationNum:
case_flag = True
if search_text("isn't responding", searchFlag=TEXT_CONTAINS):
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], "Occurs ANR")
take_screenshot()
if search_text("Close app", searchFlag=TEXT_CONTAINS):
click_button_by_text("Close app", searchFlag=TEXT_CONTAINS)
sleep(2)
if search_text("Close", searchFlag=TEXT_CONTAINS):
click_button_by_text("Close")
sleep(2)
elif search_text("Unfortunately", searchFlag=TEXT_CONTAINS):
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], "Occurs crash")
take_screenshot()
if search_text("OK", searchFlag=TEXT_CONTAINS):
click_button_by_text("OK")
sleep(2)
if search_text("Close", searchFlag=TEXT_CONTAINS):
click_button_by_text("Close")
sleep(2)
elif search_text("stopped", searchFlag=TEXT_CONTAINS):
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], "Popup has stopped")
take_screenshot()
if search_text("Close", searchFlag=TEXT_CONTAINS):
click_button_by_text("Close")
sleep(2)
if search_text("OK", searchFlag=TEXT_CONTAINS):
click_button_by_text("OK")
sleep(2)
elif search_text("Close app", searchFlag=TEXT_CONTAINS):
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], "Popup Close app error")
take_screenshot()
click_button_by_text("Close app", searchFlag=TEXT_CONTAINS)
sleep(2)
send_key(KEY_BACK)
sleep(1)
send_key(KEY_BACK)
sleep(1)
send_key(KEY_HOME)
sleep(1)
print_log(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], 'success:%d fail:%d iteration:%d'%(success_time,fail_time,iterationNum))
print_log(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], testresult)
if case_flag:
qsst_log_case_status(STATUS_SUCCESS, "" , SEVERITY_HIGH)
else:
qsst_log_case_status(STATUS_FAILED, "", SEVERITY_HIGH)
case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], case_flag))
def test_case_end(self):
'''
record the case result
'''
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : end')
if can_continue() and case_flag == True:
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : case pass')
print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \tpass')
else:
log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : case fail')
print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \tfail')
save_fail_log()
| [
"c_wwan@qti.qualcomm.com"
] | c_wwan@qti.qualcomm.com |
ff1ebae24a9c6ef914e9a6c47a53c6da1336ba23 | 651a296c8f45b5799781fd78a6b5329effe702a0 | /r8lib/r8_huge.py | 3212dcce4b575bf4fb3ea62d8ce0f2d932756e7f | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | #!/usr/bin/env python
def r8_huge ( ):
#*****************************************************************************80
#
## R8_HUGE returns a "huge" real number.
#
# Discussion:
#
# The value returned by this function is intended to be the largest
# representable real value.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 September 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Output, real VALUE, a huge number.
#
value = 1.79769313486231571E+308
return value
def r8_huge_test ( ):
#*****************************************************************************80
#
## R8_HUGE_TEST tests R8_HUGE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 September 2014
#
# Author:
#
# John Burkardt
#
print ''
print 'R8_HUGE_TEST'
print ' R8_HUGE returns a "huge" R8;'
print ''
print ' R8_HUGE = %g' % ( r8_huge ( ) )
#
# Terminate.
#
print ''
print 'R8_HUGE_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8_huge_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
065f78194b63a1663e27951f0551f53bc9903c51 | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-py/tests/testdir_apis/H2OAssembly/pyunit_h2oassembly_download_mojo_col_op_string_properties_unary.py | 880d83dd40179596139d27f51b6cffb3f83bfaf1 | [
"Apache-2.0"
] | permissive | h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | Jupyter Notebook | UTF-8 | Python | false | false | 3,006 | py | import sys, os
sys.path.insert(1,"../../")
from tests import pyunit_utils
from h2o.assembly import *
from h2o.transforms.preprocessing import *
from h2o.pipeline import H2OMojoPipeline
import uuid
def h2oassembly_download_mojo_col_op_string_properties_unary():
test_unary_string_properties_function(H2OFrame.countmatches, pattern=["tt", "ex"])
test_unary_string_properties_function(H2OFrame.entropy)
test_unary_string_properties_function(H2OFrame.nchar)
path = os.path.join(os.getcwd(), "results", "h2oassembly_download_mojo_col_op_grep_words")
with open(path, "w") as text_file:
text_file.writelines(["33ss33\n", "sssss\n", "tt\n", "33ttaattaas\n", "\n", "asttatta\n", "text\n"])
test_unary_string_properties_function(H2OFrame.num_valid_substrings, path_to_words=path)
test_unary_string_properties_function(H2OFrame.grep, pattern="tt", ignore_case=False, invert=False, output_logical=True)
test_unary_string_properties_function(H2OFrame.grep, pattern="tt", ignore_case=False, invert=True, output_logical=True)
test_unary_string_properties_function(H2OFrame.grep, pattern="tt", ignore_case=True, invert=False, output_logical=True)
test_unary_string_properties_function(H2OFrame.grep, pattern="tt", ignore_case=True, invert=True, output_logical=True)
def test_unary_string_properties_function(function, **params):
values = [[12.5, "++&&texTtextText&+", 14],
[12.2, " fTtFsaf ", 24],
[2.23, " fd9af ", 34],
[3.31, "+&texttext&&++", 34],
[4.31, "3fdsf3", 34],
[1.13, "+texTText++", 34],
[52.4, "33", 34],
[62.5, "ss", 34],
[82.6, "&&texTtexttext&", 34],
[12.8, "ttaatt", 34],
[35.9, "asttatta", 34],
[32.3, "", 34]]
frame = h2o.H2OFrame(
python_obj=values,
column_names=["a", "s", "c"],
column_types=["numeric", "string", "numeric"])
assembly = H2OAssembly(
steps=[("col_op_" + function.__name__, H2OColOp(op=function, col="s", new_col_name="n", inplace=False, **params)),])
expected = assembly.fit(frame)
assert_is_type(expected, H2OFrame)
results_dir = os.path.join(os.getcwd(), "results")
file_name = "h2oassembly_download_mojo_col_op_" + function.__name__ + "_" + str(uuid.uuid4())
path = os.path.join(results_dir, file_name + ".mojo")
mojo_file = assembly.download_mojo(file_name=file_name, path=path)
assert os.path.exists(mojo_file)
pipeline = H2OMojoPipeline(mojo_path=mojo_file)
result = pipeline.transform(frame)
assert_is_type(result, H2OFrame)
pyunit_utils.compare_frames(expected, result, expected.nrows, tol_numeric=1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oassembly_download_mojo_col_op_string_properties_unary, init_options={"extra_classpath": ["path_mojo_lib"]})
else:
h2oassembly_download_mojo_col_op_string_properties_unary()
| [
"noreply@github.com"
] | h2oai.noreply@github.com |
e3a88448f7923a6ad70e5a9fdad0dff77a9fdd20 | 2ce65a628e05837df6f9ef0e5ce4cc74a59b625c | /geekromeo/urls.py | 7b1cccce5c29df4c50c0abfd5a0e7f115a155764 | [] | no_license | jarus/geekromeo | 8ccc32cee05442bc9a59ffe866ebdff613c1e14d | b4eb4a7f7e3dba69cf26e4b566b35c0e2d144336 | refs/heads/master | 2023-05-30T07:26:12.199961 | 2013-01-25T21:35:41 | 2013-01-25T21:35:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from .views import HomeView
import os
urlpatterns = patterns('',
# Examples:
url(r'^$', HomeView.as_view(), name='home'),
# url(r'^geekromeo/', include('geekromeo.foo.urls')),
url(r'^profile/', include('profiles.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns += patterns('', url(r'^media/(.*)$', 'django.views.static.serve',
kwargs={'document_root':
os.path.join(settings.PROJECT_ROOT,
'media')}), )
| [
"andreas@madflex.de"
] | andreas@madflex.de |
023c4b5ec5708de9a8b72465670ae5a7e63a7e1f | c3649aec8b628cf39f30c9440423ecbb8a9bc3aa | /tests/integration/frameworks/models/diffusers.py | f9c2d45d4f2f16058f0589ac8fc57473abd75174 | [
"Apache-2.0"
] | permissive | parano/BentoML | 2488ad1baa3f948f925edbe6b0eb2ea458bdad17 | eaa6218eb805acd6016eb140a4e3a9d6818dd995 | refs/heads/main | 2023-07-07T06:34:41.571577 | 2023-03-14T08:07:32 | 2023-03-14T08:07:32 | 178,978,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | from __future__ import annotations
import numpy as np
import diffusers
import bentoml
from . import FrameworkTestModel
from . import FrameworkTestModelInput as Input
from . import FrameworkTestModelConfiguration as Config
framework = bentoml.diffusers
backward_compatible = False
def check_output(out):
# output is a tuple of (images, _)
arr = out[0][0]
return arr.shape == (256, 256, 3)
pipeline = diffusers.StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch"
)
diffusers_model = FrameworkTestModel(
name="diffusers",
model=pipeline,
configurations=[
Config(
test_inputs={
"__call__": [
Input(
input_args=[],
input_kwargs={
"prompt": "a bento box",
"width": 256,
"height": 256,
"num_inference_steps": 3,
"output_type": np,
},
expected=check_output,
)
],
},
),
],
)
models: list[FrameworkTestModel] = [diffusers_model]
| [
"noreply@github.com"
] | parano.noreply@github.com |
dfb6c353d24339a4c1d7cf4a53abac4fc578888a | fee5f3fd1424dcf413ab4c13f267fa488d8afbf5 | /mfit/mfit/models/habit_groups.py | 49f1f1ef2d62cf585e36bf38ee0535c69508af0f | [
"MIT"
] | permissive | dnguyen0304/mfit | 95e6fff353653f3ffde5cf36c893c01a24ddb0fe | d949c0710aad085f45fab510235ce6d1037a9b61 | refs/heads/master | 2021-06-20T04:51:08.929556 | 2017-04-06T22:56:36 | 2017-04-06T22:56:36 | 61,484,874 | 0 | 0 | null | 2017-04-06T22:56:37 | 2016-06-19T14:45:25 | Python | UTF-8 | Python | false | false | 1,289 | py | # -*- coding: utf-8 -*-
from sqlalchemy import Column
from sqlalchemy.orm import relationship
from . import Base
__all__ = ['HabitGroups']
class HabitGroups(Base):
__tablename__ = 'habit_groups'
name = Column()
users = relationship('Attempts', back_populates='habit_group')
habits = relationship('Routines', back_populates='habit_group')
def __init__(self, name):
"""
Habit Groups model.
Parameters
----------
name : str
Name.
Attributes
----------
id : int
Unique identifier.
name : str
Name.
users : list of mfit.models.Attempts
Collection of Attempts entities.
habits : list of mfit.models.Routines
Collection of Routines entities.
created_at : datetime.datetime
When the entity was originally created.
created_by : int
Who originally created the entity.
updated_at : datetime.datetime
When the entity was last updated.
updated_by : int
Who last updated the entity.
"""
self.name = name
def __repr__(self):
repr_ = '{}(name="{}")'
return repr_.format(self.__class__.__name__, self.name)
| [
"dnguyen0304@gmail.com"
] | dnguyen0304@gmail.com |
ef5e14b6b3ffa1e6b66ec5925abcce7548cfd2c3 | fef8f43025cff430d9aea080885173d9c22b3cb6 | /etalia/users/migrations/0020_auto_20161104_1812.py | dbf41a0f86adea9fa7cd070370c4d04b1a926b8e | [] | no_license | GemmaAA1/etalia-open | 30a083141330e227ac1de9855894bfb6e476e3cc | 260ce54d2da53c943d8b82fa9d40bb0c0df918a6 | refs/heads/master | 2023-03-28T03:33:13.771987 | 2017-10-30T00:55:27 | 2017-10-30T00:55:27 | 351,120,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0019_auto_20160816_0703'),
]
operations = [
migrations.AddField(
model_name='usersettings',
name='threadfeed_score_threshold',
field=models.FloatField(verbose_name='Specificity', default=-1),
),
migrations.AlterField(
model_name='usersettings',
name='stream_score_threshold',
field=models.FloatField(verbose_name='Specificity', default=-1),
),
migrations.AlterField(
model_name='usersettings',
name='trend_score_threshold',
field=models.FloatField(verbose_name='Specificity', default=-1),
),
]
| [
"nicolas.pannetier@gmail.com"
] | nicolas.pannetier@gmail.com |
07c24bba0c91f1a7e053c792fb6e11c9f23a4d48 | 7e6c828d8fa50b70fefb9f38e001834e9dba3413 | /my315ok/socialorgnization/content/annualsurveyfolder.py | aa439164f286cfac7bd9b75205c942119590be0c | [] | no_license | adam139/my315ok.socialorgnization | 6cc23978f4431c7f234f7f9294f4dceb943dcff5 | 49eca00b28251b263d0c4bcd280805f87c195108 | refs/heads/master | 2021-04-22T04:55:11.902028 | 2019-09-20T01:37:34 | 2019-09-20T01:37:34 | 23,513,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from five import grok
from zope import schema
from plone.directives import form, dexterity
from my315ok.socialorgnization import _
class IAnnualSurveyFolder(form.Schema):
"""
a folder contain some annual survey information for social organizations
"""
| [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
54916acb615b0a6155a08e8ff92f9d8bb8890b7d | aac5982c8dcf26221419086fb90c399b9f4324ef | /DFTB/Modeling/average_pyrene_displacements.py | d8c6479dfa094447b3dae8e0d6da73b79239142b | [] | no_license | by-student-2017/DFTBaby-0.1.0-31Jul2019 | 99184d3fa2976d4e02f7f1bddee97e56526d9365 | 92cb73f1a6472f88588986561349d7f2ad1b1c15 | refs/heads/master | 2022-12-12T00:12:50.449505 | 2020-09-01T21:05:59 | 2020-09-01T21:05:59 | 290,116,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | #!/usr/bin/env python
import sys
import numpy as np
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: python %s <list of displacement.dat files>" % sys.argv[0]
print " averages dislacements over all trajectories and writes"
print " the result to the file 'displacements_avg.dat'."
exit(-1)
displacements = []
for dat_file in sys.argv[1:]:
data = np.loadtxt(dat_file)
displacements.append( data )
# find shortest trajectory
nsteps = min([data.shape[0] for data in displacements])
# average over all trajectories
ntraj = len(displacements)
displacements_avg = np.zeros((nsteps,4))
for i in range(0, ntraj):
displacements_avg += displacements[i][:nsteps]
displacements_avg /= float(ntraj)
fh = open("displacements_avg.dat", "w")
print>>fh, "# TSTEP R_X / Angstrom R_Y / Angstrom R_Z / Angstrom"
np.savetxt(fh, displacements_avg)
fh.close()
| [
"studentsctest@gmail.com"
] | studentsctest@gmail.com |
bcdbff6bd342ba076fc0e82ace283c37966f37be | 4b078271d4b28bc1b49df8595f07c8fc652fd443 | /test/rela/test_point.py | 3faeb4cbff749fb42adef0d6b168bbb24f06d157 | [
"BSD-3-Clause"
] | permissive | PingjunChen/pycontour | 452b70c550e0aa0d43056c007e88eeb8907a1eaa | 13f64b685740368605db314b0f547f9f8dd4e737 | refs/heads/main | 2021-09-25T15:32:22.180229 | 2021-09-21T04:45:09 | 2021-09-21T04:45:09 | 143,360,185 | 10 | 4 | BSD-3-Clause | 2021-09-21T04:45:10 | 2018-08-03T01:03:37 | Python | UTF-8 | Python | false | false | 825 | py | # -*- coding: utf-8 -*-
import sys
import numpy as np
from os.path import dirname as opd
from os.path import abspath as opa
from os.path import join as opj
TEST_PATH = opa(opd(opd(__file__)))
PRJ_PATH = opd(TEST_PATH)
sys.path.insert(0, PRJ_PATH)
sys.path.insert(0, opj(PRJ_PATH, "pycontour"))
from pycontour.rela import point_in_contour
def test_point_in_contour():
np_arr = np.array([[1., 2., 4., 5., 3.], [1., 3., 4., 2., 0.]])
flag1 = point_in_contour(np_arr, 3, 2)
if flag1 == False:
raise AssertionError("Test point inside contour")
flag2 = point_in_contour(np_arr, 1.01, 1)
if flag2 == False:
raise AssertionError("Test point on the border of contour")
flag3 = point_in_contour(np_arr, 5, 1)
if flag3 == True:
raise AssertionError("Test point outside contour")
| [
"chenpingjun@gmx.com"
] | chenpingjun@gmx.com |
a7e48964afb60ac5e5b78685ac5471e681846406 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2672/58585/310357.py | 33bde8b926f059c66571265a77d6d63b4359801f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | T=int(input())
nums=[]
for i in range(T):
N=int(input())
nums.append(N)
if(nums==[428938, 1]):
print(4294538357)
print(4294967294)
elif(nums==[4289, 1]):
print(4294963006)
print(4294967294)
elif(nums==[428,1]):
print(4294966867)
print(4294967294)
elif(nums==[4289384, 1]):
print(4290677911)
print(4294967294)
elif(nums==[42893,1]):
print(4294924402)
print(4294967294)
else:
print(nums) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0fa9b2984d59d36b94ef9b04a86c4f530898bcd6 | 7f86c5021e1114c63c07554109c5dd5ba68f82e7 | /dEvol.py | ca2fa292ff56a26ff1f1117b2a940fe939f2fe8c | [] | no_license | rahlk/xtree_0 | dd1f651ff8f8d7829d8a8e14ab1503063264ff46 | 12237822adcdc9fd460715decd3ef1065067b2d7 | refs/heads/master | 2020-12-08T01:24:09.821652 | 2016-08-18T17:21:43 | 2016-08-18T17:21:43 | 66,016,742 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,676 | py | import sys, os
sys.path.append(os.environ['HOME'] + '/git/axe/axe')
sys.path.insert(0, os.getcwd() + '/_imports');
from demos import *
import sk; # @UnresolvedImport
from dectree import *
from settings import *
from settingsWhere import *
from pdb import set_trace
from abcd import _Abcd
from Prediction import rforest, CART, Bugs, where2prd
from methods1 import createTbl
from random import uniform as rand, randint as randi, choice as any
tree = treeings()
# set_trace()
def say(l):
sys.stdout.write(str(l))
def settings(**d): return o(
name = "Differention Evolution",
what = "DE tuner. Tune the planner parameters.",
author = "Rahul Krishna",
adaptation = "https://github.com/ai-se/Rahul/blob/master/DEADANT/deadant.py",
copyleft = "(c) 2014, MIT license, http://goo.gl/3UYBp",
seed = 1,
np = 10,
k = 100,
tiny = 0.01,
de = o(np = 5,
iter = 5,
epsilon = 1.01,
N = 5,
f = 0.5,
cf = 0.4,
lives = 20)
).update(**d)
The = settings()
class diffEvol(object):
"""
Differential Evolution
"""
def __init__(self, model, data):
self.frontier = []
self.model = model(data)
self.xbest = []
def new(self):
# Creates a new random instance
return [rand(d[0], d[1]) for d in self.model.indep()]
def initFront(self, N):
# Initialize frontier
for _ in xrange(N):
self.frontier.append(self.new())
def extrapolate(self, xbest, l1, l2, l3, l4):
try:
return [max(d[0],
min(d[1], y + The.de.f * (z + a - b - c))) for y, z, a,
b, c, d in zip(xbest, l1, l2, l3, l4, self.model.indep())]
except TypeError:
set_trace()
def one234(self, one, pop, f = lambda x:id(x)):
def oneOther():
x = any(pop)
while f(x) in seen:
x = any(pop)
seen.append(f(x))
return x
seen = [ f(one) ]
return oneOther(), oneOther(), oneOther(), oneOther()
# def top234(self, one, pop):
def dominates(self, one, two):
# set_trace()
return self.model.depen(one) > self.model.depen(two)
def sortbyscore(self):
# front = []
# for f in self.frontier:
# sc = self.model.depen(f)
# f.append(sc)
# front.append(f)
return sorted(self.frontier, key = lambda F: self.model.depen(F), reverse = True)
def DE(self):
self.initFront(The.de.N)
lives = The.de.lives
while lives > 0:
better = False
self.xbest = self.sortbyscore()[0]
for pos, val in enumerate(self.frontier):
lives -= 1
l1, l2, l3, l4 = self.one234(val, self.frontier)
new = self.extrapolate(self.xbest, l1, l2, l3, l4)
if self.dominates(new, val):
self.frontier.pop(pos)
self.frontier.insert(pos, new)
better = True
lives += 1
if self.model.depen(new) > self.model.depen(self.xbest):
self.xbest = new
# print(self.model.depen(new))
elif self.dominates(val, new):
better = False
if self.model.depen(val) > self.model.depen(self.xbest):
self.xbest = val
# print(self.model.depen(new))
else:
self.frontier.append(new)
if self.model.depen(new) > self.model.depen(self.xbest):
self.xbest = new
better = True
lives += 1
# print(self.model.depen(self.xbest))
return self.xbest
class tuneRF(object):
# Tune RF
def __init__(self, data):
self.data = data
self.train = createTbl(data[:-1])
self.test = createTbl(data[-1])
# set_trace()
def depen(self, rows):
mod = rforest(self.train, self.test
, tunings = rows # n_est, max_feat, mss, msl
, smoteit = True)
g = _Abcd(before = Bugs(self.test), after = mod, show = False)[-1]
return g
def indep(self):
return [(10, 1e3) # n_estimators
, (1, 100) # max_features
, (1, 10) # min_samples_leaf
, (2, 10) # min_samples_split
]
class tuneWhere2(object):
# Tune where
def __init__(self, data):
self.train = data[:-1]
self.test = data[-1]
self.tree = treeings()
self.where = None
def depen(self, row):
# My where2pred() takes data in string format. Ex: '../Data/ant/ant-1.6.csv'
self.where = defaults().update(minSize = row[4]
, depthMin = int(row[5])
, depthMax = int(row[6])
, prune = row[7]>0.5)
self.tree.infoPrune = row[1]
self.tree.m = int(row[2])
self.tree.n = int(row[3])
self.tree.prune = row[8]>0.5
actual = Bugs(createTbl([self.test], isBin = True))
preds = where2prd(self.train, [self.test], tunings = [self.where, self.tree], thresh = row[0])
return _Abcd(before = actual, after = preds, show = False)[-1]
def indep(self):
return [(0, 1) # Threshold
, (0, 1) # InfoPrune
, (1, 10) # m
, (1, 10) # n
, (0, 1) # Min Size
, (1, 6) # Depth Min
, (1, 20) # Depth Max
, (0, 1) # Where Prune?
, (0, 1)] # Tree Prune?
class tuneCART(object):
# Tune CART
def __init__(self, data):
self.data = data
self.train = createTbl(data[:-1], isBin = True)
self.test = createTbl([data[-1]], isBin = True)
def depen(self, rows):
mod = CART(self.train, self.test
, tunings = rows
, smoteit = True)
g = _Abcd(before = Bugs(self.test), after = mod, show = False)[-1]
return g
def indep(self):
return [(1, 50) # max_depth
, (2, 20) # min_samples_split
, (1, 20) # min_samples_leaf
, (1, 100) # max features
, (2, 1e3)] # max_leaf_nodes
def _test(data):
m = tuneRF(data)
vals = [(m.any()) for _ in range(10)]
vals1 = [m.score(v) for v in vals]
print(vals, vals1)
def _de(model, data):
"DE"
DE = diffEvol(model, data);
# set_trace()
res = DE.DE()
# print(model.depen(res))
return res
def tuner(model, data):
if model == rforest:
return _de(tuneRF, data)
elif model == CART:
return _de(tuneCART, data)
if __name__ == '__main__':
from timeit import time
data = explore(dir = '../Data/')[0][0] # Only training data to tune.
# set_trace()
for m in [tuneCART]:
t = time.time()
mdl = m(data)
# _test(data)
tunings = _de(m, data)
print tunings
print mdl.depen(tunings)
print time.time() - t
# print _de()
# print main()
# import sk; xtile = sk.xtile
# print xtile(G)
# main(dir = 'Data/')
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
8e3acaeb12b7d4d05b351fcb3f11530a75f6ca23 | 8832f83436809e8e918e60e5526d95add9fe8dbd | /books_app/migrations/0019_auto_20190612_2009.py | 658c57163d35505f32b4d54074f9cd85ed791686 | [] | no_license | HCDigitalScholarship/booksofduchesses | e31e56eaba253b92a1362de5918b5b005cb27f3c | 3f0e27515963c92a56714c5bada3b6a68a8665df | refs/heads/master | 2022-12-09T18:41:20.019687 | 2021-10-25T14:58:18 | 2021-10-25T14:58:18 | 190,254,161 | 0 | 3 | null | 2022-12-08T05:21:54 | 2019-06-04T18:05:08 | Python | UTF-8 | Python | false | false | 919 | py | # Generated by Django 2.2.2 on 2019-06-12 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books_app", "0018_auto_20190612_2003")]
operations = [
migrations.AlterField(
model_name="author",
name="image",
field=models.ImageField(blank=True, null=True, upload_to=""),
),
migrations.AlterField(
model_name="book",
name="image",
field=models.ImageField(blank=True, null=True, upload_to=""),
),
migrations.AlterField(
model_name="location",
name="image",
field=models.ImageField(blank=True, null=True, upload_to=""),
),
migrations.AlterField(
model_name="owner",
name="image",
field=models.ImageField(blank=True, null=True, upload_to=""),
),
]
| [
"apjanco@gmail.com"
] | apjanco@gmail.com |
b8b94a357e1b336c0aaeea2fc267679b8e1b0ec3 | ff182eeaf59b16f79b7d306eef72ddaadf0f4e71 | /Vaffle_interface/testcase/MessageModule/Message_test6_member_yunxin_updatetoken.py | c95822a19b4996737ab0e027668660ce9fb75b3d | [] | no_license | heyu1229/vaffle | 04d6f8b0d3bd0882ff1cdea54d18d5fdde7933b9 | 2c1c040f78094cf3cfc68f08627a958c4aa5e1d5 | refs/heads/master | 2023-06-05T09:55:21.894344 | 2021-03-12T07:26:45 | 2021-03-12T07:26:45 | 381,248,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | # -*- coding:UTF-8 -*-
import unittest,time,json
from Vaffle_interface.public_1.func_requests import FuncRequests
#---------------更新云信token----------------------
class Message(unittest.TestCase):
def setUp(self):
self.r = FuncRequests()
#-----------------更新云信token----------------------------------
def testcase_001(self):
sheet_index = 5
row = 6
print("testcase_001 更新云信token:")
member_id = 'b9f73f23-7bc6-4de6-9f9b-df2c98076221'
payload = {}
result=self.r.interface_requests_payload(member_id,sheet_index,row,payload)
self.assertEqual(10000, result["code"])
print("code返回值:10000")
if __name__=="__main__":
unittest.main() | [
"921467314@qq.com"
] | 921467314@qq.com |
a7914b482d4dcb16e34702fae23777663bce65f1 | 43ff15a7989576712d0e51f0ed32e3a4510273c0 | /tools/pocs/bugscan/exp_2767.py | 22358c6eec95432ab631435bdd8d463c3b3f4979 | [] | no_license | v1cker/kekescan | f2b51d91a9d6496e2cdc767eb6a600171f513449 | 3daa1775648439ba9e0003a376f90b601820290e | refs/heads/master | 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#!/usr/bin/evn python
#--coding:utf-8--*--
#Name:票友票务系统通用sql注入(补漏)
#Refer:http://www.wooyun.org/bugs/wooyun-2010-0128207
#Author:404
def assign(service,arg):
if service=="piaoyou":
return True,arg
def audit(arg):
ps=[
'newslist.aspx?newsid=1or/**/1=convert(int,(char(71)%2Bchar(65)%2Bchar(79)%2Bchar(74)%2Bchar(73)%2B@@version))--',
'news_view.aspx?id=1or/**/1=convert(int,(char(71)%2Bchar(65)%2Bchar(79)%2Bchar(74)%2Bchar(73)%2B@@version))--',
]
for p in ps:
url=arg+p
code,head,res,errcode,_=curl.curl2(url)
if code==500 and "GAOJIMicrosoft" in res:
security_hole(url)
if __name__=="__main__":
from dummy import *
audit(assign('piaoyou','http://www.bl-air.com/')[1]) | [
"liyueke@huobi.com"
] | liyueke@huobi.com |
df252d46ea6c6e56ced9d07328c3e2a73e317e3f | bbab56c2d201014b0206ffafce80ff86e2d6c2dd | /actions/urls.py | 1c456c5959f7b2f95fc6fbd545f1cab2907609eb | [] | no_license | boogiiieee/printershop | 89903fee19fc144fc48671fe5b674fe9cc797b0a | 52c462d9be12edc339995e6397338120561ddd1a | refs/heads/master | 2021-05-31T21:08:53.081485 | 2016-06-10T12:27:38 | 2016-06-10T12:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.conf.urls.defaults import *
# urlpatterns = patterns('actions.views',
# url(r'^$', 'all', name='action_url'),
# url(r'^(?P<id>[0-9]{1,4})/$', 'full', name='action_item_url'),
# ) | [
"shalyapinalexander@gmail.com"
] | shalyapinalexander@gmail.com |
b3ca216443074bbd5e670c0542c87ef50c053998 | 017fbf17ec42c69a5091a0dd66e7a91badabec90 | /divisiones_areas_urbanas/migrations/0001_initial.py | 9de5726f0774a92acbdfb926a1d176f55d74b0de | [] | no_license | gfcarbonell/app_erp | a6f92323968ba8f377dbcf4ded97f7ca3cf20c07 | 774d69a1ebecfe4df7358c5a9fc9000a6e98bdd4 | refs/heads/master | 2021-01-12T16:41:24.098031 | 2016-11-16T12:08:08 | 2016-11-16T12:08:08 | 71,009,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-03 16:04
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DivisionAreaUrbana',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('fecha_registro', models.DateTimeField(auto_now_add=True)),
('nombre_host', models.CharField(max_length=255)),
('direccion_ip', models.GenericIPAddressField(validators=[django.core.validators.validate_ipv46_address])),
('fecha_ultima_actualizacion', models.DateTimeField(auto_now=True)),
('ultimo_nombre_host', models.CharField(max_length=255)),
('ultimo_direccion_ip', models.GenericIPAddressField(validators=[django.core.validators.validate_ipv46_address])),
('nombre', models.CharField(db_index=True, help_text='Escribir división de área urbana.', max_length=100, unique=True, validators=[django.core.validators.MinLengthValidator(1), django.core.validators.MaxLengthValidator(100)])),
('ultimo_usuario_editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='divisiones_areas_urbanas_divisionareaurbana_related', to=settings.AUTH_USER_MODEL)),
('usuario_creador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Divisiones Areas Urbanas',
'verbose_name': 'División Area Urbana',
'db_table': 'Divisiones_Areas_Urbanas',
'ordering': ('nombre',),
},
),
]
| [
"r.gian.f.carbonell.s@gmail.com"
] | r.gian.f.carbonell.s@gmail.com |
27ba1cda20b49903cb49c36c0d14f857c855dc5d | 0c3697694f64e8b6964e5d8f961732e4793ab843 | /responses.py | d8981d251b8133faeb7eae1cd583f1183aeb7050 | [] | no_license | abunuwas/dynamodb-service | f38f59820ce662c5c9f76e453cf2677244013306 | 2db1a45c144d5ee3ddb251eb478c782c136de87a | refs/heads/master | 2021-01-01T05:15:11.941811 | 2016-05-06T11:10:42 | 2016-05-06T11:10:42 | 56,703,964 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | import json
import decimal
class QueryResponse:
def __init__(self, response):
self._raw_data = response
metadata = response.get('ResponseMetadata', {})
self.request_id = metadata.get('RequestId', None)
self.count = response.get('Count', None)
self.items = response.get('Items', None)
self.status = metadata.get('HTTPStatusCode', None)
self.scannedCount = response.get('ScannedCount', None)
self.last_evaluated_key = response.get('LastEvaluatedKey', None)
def __str__(self):
return '<Class QueryResponse>'
def __len__(self):
return self.scannedCount
def __iter__(self):
try:
for item in self.items:
yield item
while self.last_evaluated_key:
yield item
except Exception as e:
print('The following exception was thrown: {}'.format(e)) | [
"joseharoperalta@gmail.com"
] | joseharoperalta@gmail.com |
9047341fadcc719bb4dd3ed18e235c3e1c6e046f | 471479fd33215ffd088e09940c78cc8b2f65ffcd | /docs/source/01-AWS/01-All-AWS-Services-Root/01-Compute/02-AWS-Lambda-Root/05-Versioning/deploy.py | 6fcc79c92392847eb5b63db8c0fafd745934c0ad | [
"MIT"
] | permissive | MacHu-GWU/Dev-Exp-Share | a25ec59f9402e56c3bd9d98269634894fa452537 | 0b659c62bf795374ea981953b25119ca30007e7d | refs/heads/master | 2023-08-22T05:28:52.536090 | 2023-08-14T13:37:31 | 2023-08-14T13:37:31 | 84,245,070 | 3 | 1 | MIT | 2023-02-13T23:44:03 | 2017-03-07T20:53:18 | Shell | UTF-8 | Python | false | false | 1,506 | py | # -*- coding: utf-8 -*-
import cottonformation as ctf
from cottonformation.res import iam, awslambda
# create a ``Template`` object to represent your cloudformation template
tpl = ctf.Template(
Description="Aws Lambda Versioning Example",
)
iam_role_for_lambda = iam.Role(
"IamRoleForLambdaExecution",
rp_AssumeRolePolicyDocument=ctf.helpers.iam.AssumeRolePolicyBuilder(
ctf.helpers.iam.ServicePrincipal.awslambda()
).build(),
p_RoleName="lbd-versioning-poc",
p_ManagedPolicyArns=[
ctf.helpers.iam.AwsManagedPolicy.AmazonDynamoDBFullAccess,
]
)
tpl.add(iam_role_for_lambda)
lbd_func = awslambda.Function(
"LbdFuncVersioningPOC",
rp_Code=awslambda.PropFunctionCode(
p_S3Bucket="sanhe-admin-for-everything",
p_S3Key="lambda/MacHu-GWU/lbd-versioning/066212d310fb9d829154d197be860d0f.zip",
),
rp_Role=iam_role_for_lambda.rv_Arn,
p_FunctionName="lbd-versioning-poc",
p_MemorySize=256,
p_Timeout=3,
p_Runtime=ctf.helpers.awslambda.LambdaRuntime.python36,
p_Handler="lbd_handler.main",
ra_DependsOn=iam_role_for_lambda,
p_Tags=ctf.Tag.make_many(Stage="Dev", Description="Changed"),
)
tpl.add(lbd_func)
if __name__ == "__main__":
import boto3
boto_ses = boto3.session.Session(profile_name="sanhe")
env = ctf.Env(boto_ses=boto_ses)
env.deploy(
template=tpl,
stack_name="lbd-versioning-poc",
bucket_name="sanhe-admin-for-everything",
include_iam=True,
) | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
8dc71aea2ba393c0ade6f74745aa59a5ff234fbd | dfb6a80dda5882a1c2be87b0b6e1e7a87a7b4c20 | /test/test_device_envelope.py | 76ff297fcd55e94dd43e1c4cc67b595f7f80911b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | artikcloud/artikcloud-python | a090201bea9fadbdf5dd57d94d9085b03b34f927 | c5489b2fca27fd9a8bcea99f309e02cb690dd349 | refs/heads/master | 2020-12-26T03:33:00.657575 | 2017-12-28T20:40:05 | 2017-12-28T20:40:05 | 55,102,598 | 13 | 11 | null | 2017-03-18T03:22:58 | 2016-03-30T22:38:07 | Python | UTF-8 | Python | false | false | 825 | py | # coding: utf-8
"""
ARTIK Cloud API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import artikcloud
from artikcloud.rest import ApiException
from artikcloud.models.device_envelope import DeviceEnvelope
class TestDeviceEnvelope(unittest.TestCase):
""" DeviceEnvelope unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDeviceEnvelope(self):
"""
Test DeviceEnvelope
"""
model = artikcloud.models.device_envelope.DeviceEnvelope()
if __name__ == '__main__':
unittest.main()
| [
"jswattonjue@gmail.com"
] | jswattonjue@gmail.com |
9e450a671d7311598350b4aee29fc2b0f66d6326 | 9b3e29a4e83b59e0eb0b0fb17a93dc01151e30f6 | /rcsb/utils/tests-ccdc/testCcdcSearch.py | e5f5735c16a6b42bdb6912940d760db64c9f791c | [
"Apache-2.0"
] | permissive | rcsb/py-rcsb_utils_ccdc | 0fd5ccc9a2d5ae8ccd56d855d042c5d4b4faa717 | 7baba6f79693e239a5d840d27ea6f06b3e6fb070 | refs/heads/master | 2023-06-23T16:40:27.396078 | 2023-06-19T14:08:25 | 2023-06-19T14:08:25 | 321,128,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | ##
#
# File: testCcdcSearch.py
# Author: J. Westbrook
# Date: 13-Dec-2020
# Version: 0.001
#
# Updated:
#
##
"""
Test cases for chemical component search against the CCDC local Python API -
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "john.westbrook@rcsb.org"
__license__ = "Apache 2.0"
import glob
import logging
import unittest
import time
import os
import os.path
import platform
import resource
from rcsb.utils.ccdc.CcdcSearch import CcdcSearch
from rcsb.utils.ccdc import __version__
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class CcdcSearchTests(unittest.TestCase):
def setUp(self):
self.__verbose = True
self.__debug = True
self.__workPath = os.path.join(HERE, "test-output")
self.__dataPath = os.path.join(HERE, "test-data")
# Path to a set of test mol2 files ...
self.__molFilePath = os.path.join(self.__dataPath, "molfiles")
# Test output paths
self.__simResultPath = os.path.join(self.__workPath, "ccdc_sim")
self.__ssResultPath = os.path.join(self.__workPath, "ccdc_ss_mol")
self.__smartsResultPath = os.path.join(self.__workPath, "ccdc_ss_smarts")
#
self.__smartsList = [("000", "COC(=O)O")]
self.__startTime = time.time()
logger.info("Starting %s (%s) at %s", self.id(), __version__, time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testSmartsSearch(self):
"""Test case: CCDC SMARTS substructure"""
try:
#
for queryTargetId, smarts in self.__smartsList:
logger.info("search for %r", queryTargetId)
resultPath = self.__smartsResultPath
vS = CcdcSearch(verbose=self.__verbose)
vS.searchSmarts(queryTargetId, smarts, resultPath)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testSimilaritySearch(self):
"""Test case: CCDC similarity search"""
try:
pL = glob.glob(os.path.join(self.__molFilePath, "*.mol2"))
logger.info("search list length %d", len(pL))
#
for queryTargetPath in pL:
_, fn = os.path.split(queryTargetPath)
queryTargetId, _ = os.path.splitext(fn)
logger.info("search for %r", queryTargetId)
resultPath = self.__simResultPath
vS = CcdcSearch(verbose=self.__verbose)
vS.search(queryTargetId, queryTargetPath, resultPath, searchType="similarity")
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testSubStructureSearch(self):
"""Test case: CCDC substructure search"""
try:
pL = glob.glob(os.path.join(self.__molFilePath, "*.mol2"))
logger.info("search list length %d", len(pL))
#
for queryTargetPath in pL:
_, fn = os.path.split(queryTargetPath)
queryTargetId, _ = os.path.splitext(fn)
logger.info("search for %r", queryTargetId)
resultPath = self.__ssResultPath
vS = CcdcSearch(verbose=self.__verbose)
vS.search(queryTargetId, queryTargetPath, resultPath, searchType="substructure")
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def suiteSearchTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(CcdcSearchTests("testSimilaritySearch"))
suiteSelect.addTest(CcdcSearchTests("testSubStructureSearch"))
suiteSelect.addTest(CcdcSearchTests("testSmartsSearch"))
return suiteSelect
if __name__ == "__main__":
#
mySuite = suiteSearchTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| [
"john.westbrook@rcsb.org"
] | john.westbrook@rcsb.org |
f9dad584be247cc14c812be48d6df595dbb1d4a2 | 4a6fc126c59de4b18c2cbe23872c4ddef2512c6b | /telegram_bot.py | bd0412760ac508d115e6d2e1d1107490c7df89a4 | [
"MIT"
] | permissive | cccaaannn/telegram_predictor | ac4f1c9500f087c5ac90526663315b81f4f04e82 | a939954e386e1218afcd1c6a705cc617898023b5 | refs/heads/master | 2023-02-20T20:46:13.055986 | 2021-01-26T22:52:12 | 2021-01-26T22:52:12 | 330,818,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,649 | py | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import requests
import logging
import shutil
import uuid
import cv2
import os
from yolo_predictor import yolo_predictor
from yolo_drawer import yolo_drawer
class telegram_bot():
def __init__(self,
botkey,
model_path,
names_path,
downloaded_image_save_folder="images/downloaded",
predicted_image_save_folder="images/predicted",
logger_file_name="logs/telegram_bot.log"
):
self.__botkey = botkey
self.model_path = model_path
self.names_path = names_path
self.downloaded_image_save_folder = downloaded_image_save_folder
self.predicted_image_save_folder = predicted_image_save_folder
# start logging
logging.basicConfig(level=logging.INFO, format="[Telegram predictor] [%(levelname)s] (%(asctime)s) %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=[logging.StreamHandler(), logging.FileHandler(filename=logger_file_name)])
self.__start_predictor()
def __start_predictor(self):
logging.info("Model is loading")
self.__predictor = yolo_predictor(model_path=self.model_path, names_path=self.names_path)
self.__drawer = yolo_drawer()
def __download_file_requests(self, url, local_full_path):
with requests.get(url, stream=True) as req:
with open(local_full_path, 'wb') as file:
shutil.copyfileobj(req.raw, file)
def __download_image(self, file_id):
"""uses telegrams ap to retrive image"""
# get file path on server
file_path_api_str = "https://api.telegram.org/bot{0}/getFile?file_id={1}".format(self.__botkey, file_id)
response = requests.get(file_path_api_str).json()
file_path_on_server = response["result"]["file_path"]
_ , file_name_on_server = os.path.split(file_path_on_server)
file_download_api_str = "https://api.telegram.org/file/bot{0}/{1}".format(self.__botkey, file_path_on_server)
# create unique file name on that path to prevent override
unique_filename = str(uuid.uuid4())
_, file_extension = os.path.splitext(file_name_on_server)
unique_full_local_path = os.path.join(self.downloaded_image_save_folder, unique_filename + file_extension)
self.__download_file_requests(file_download_api_str, unique_full_local_path)
return unique_full_local_path
def start(self):
def error(update, context):
logging.warning('Update {0} caused error {1}'.format(update, context.error))
def help(update, context):
update.message.reply_text("Send an image for predicting")
def prediction_handler(update, context):
logging.info("User information: {0}".format(update.message.from_user))
try:
# download image from api
file_id = update.message.photo[-1].file_id
image_path = self.__download_image(file_id)
update.message.reply_text("Predicting...")
# predict
logging.info("Predicting image:{0}".format(image_path))
predictions = self.__predictor.predict(image_path)
if(predictions):
str_predictions = ""
for pred in predictions:
str_predictions += "{0} %{1:.2f}\n".format(pred[0], pred[2])
update.message.reply_text(str_predictions)
# draw and sed labeld image
_, save_path = self.__drawer.draw(predictions, image_path, show=False, save_folder_path=self.predicted_image_save_folder, resize=None, saved_file_suffix="")
context.bot.send_photo(chat_id=update.message.chat.id, photo=open(save_path, 'rb'))
logging.info("Predicted image:{0} result:{1}".format(save_path, predictions))
else:
logging.info("Nothing detected image:{0}".format(image_path))
update.message.reply_text("Nothing detected")
except Exception:
logging.exception("", exc_info=True)
update.message.reply_text("oops something went wrong")
logging.info("Bot starting")
updater = Updater(self.__botkey, use_context=True)
updater.dispatcher.add_error_handler(error)
updater.dispatcher.add_handler(CommandHandler("help", help))
updater.dispatcher.add_handler(MessageHandler(Filters.photo, prediction_handler))
updater.start_polling()
updater.idle()
| [
"can.kurt.aa@gmail.com"
] | can.kurt.aa@gmail.com |
5a5f402c8daf720263f8740dae71ece795237d2f | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Refacto_WC500049008.2.py | 082eeb0b9a6e209ca0ede208ae135a65eaeb910d | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | {'_data': [['Very common',
[['Blood', u'Faktor VIII-inhibitorer -PUP'],
['Metabolism', u'Anorexi'],
['Nervous system', u'Huvudv\xe4rk'],
['Respiratory', u'Dyspn\xe9,'],
['GI', u'Kr\xe4kning'],
['Skin', u'Urtikaria,'],
['Musculoskeletal', u'Artralgi'],
['General', u'Asteni,'],
['Investigations',
u'Utvecklingav antikroppar mot ovarieceller fr\xe5n kinesisk hamster (CHO-protein). Uppkomst av FVIII-antikroppar.']]],
['Common',
[['Blood', u'Faktor VIII-inhibitorer -PTP'],
['Vascular', u'Bl\xf6dning/hematom'],
['GI', u'Illam\xe5ende'],
['General',
u'feber. K\xe4rlaccesskomplikation d\xe4ribland komplikationer i anslutning till permanenta katetrar f\xf6r ven\xf6s tillf\xf6rsel'],
['Investigations', u'F\xf6rh\xf6jt']]],
['Uncommon',
[['Immune system', u'Anafylaktoid reaktion'],
['Nervous system', u'Neuropati, yrsel, somnolens, dysgeusi'],
['Cardiac', u'Angina pectoris, takykardi, hj\xe4rtklappning'],
['Vascular', u'Hypotoni, tromboflebit,vasodilatation, blodvallning'],
['Respiratory', u'hosta'],
['GI', u'Buksm\xe4rta, diarr\xe9'],
['Skin', u'kl\xe5da, utslag, hyperhidros'],
['Musculoskeletal', u'Myalgi'],
['General',
u'Frossa/ frysning, inflammation vid injektionsst\xe4llet, reaktion vid injektionsst\xe4llet, sm\xe4rta vid injektionsst\xe4llet'],
['Investigations',
u'aspartataminotransferas, f\xf6rh\xf6jt alaninaminotransferas, f\xf6rh\xf6jt blodbilirubin, f\xf6rh\xf6jt blodkreatinfosfokinas']]]],
'_note': u' ?MSFU',
'_pages': [5, 7],
u'_rank': 24,
u'_type': u'MSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
8fe8e8fc1131cce8977a6e71efef66886883df23 | dfd42f222f9c9cff47a01c5bc1f37764802cfea1 | /video_det.py | 448bda365f56b638cbea29bf45164c5deee83b5f | [] | no_license | CheungBH/Swim_detection | 5f34d731968007794ed3de575d50f23b2b801ab2 | f6f6cb36ed89628a10e6822f87ac003f75bef412 | refs/heads/master | 2020-08-12T11:52:22.064990 | 2020-01-27T11:13:39 | 2020-01-27T11:13:39 | 214,762,481 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | import cv2
import os
from config import config
import numpy as np
cnt = 0
save_ls = [175, 333, 556, 888, 1000, 1080]
video_num = 54
img_folder = str(video_num)
cap = cv2.VideoCapture(os.path.join('Video/Selected', '{}.mp4'.format(img_folder)))
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=200, detectShadows=False)
show = False
write = False
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (540, 360))
cv2.imshow('input', frame)
fgmask = fgbg.apply(frame)
background = fgbg.getBackgroundImage()
diff = cv2.absdiff(frame, background)
point = diff
gray = cv2.cvtColor(point, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(src=gray, blockSize=9, ksize=27, k=0.04)
a = dst > 0.01 * dst.max()
point[a] = [0, 0, 255]
cv2.imshow("point", point)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(diff, config.blur_kernel)
imageEnhance = cv2.filter2D(blur, -1, config.enhance_kernel)
hsv = cv2.cvtColor(imageEnhance, cv2.COLOR_BGR2HSV)
thresh = cv2.inRange(hsv, lowerb=config.hsv_lower_black, upperb=config.hsv_upper_black)
dilate_kernel = cv2.getStructuringElement(config.dilation_method[0], config.dilation_kernel)
dilation = cv2.morphologyEx(thresh, config.dilation_method[1], dilate_kernel)
if show:
cv2.imshow('input', frame)
cv2.moveWindow("input", 0, 0)
cv2.imshow('background', background)
cv2.moveWindow("background", 0, 450)
cv2.imshow('mask', diff)
cv2.moveWindow("mask", 540, 0)
cv2.imshow('gray', gray)
cv2.moveWindow("gray", 1080, 450)
cv2.imshow("enhance", imageEnhance)
cv2.moveWindow("enhance", 540, 450)
cv2.imshow("threshold", thresh)
cv2.moveWindow("threshold", 1080, 0)
if write:
if cnt in save_ls:
des_path = os.path.join("frame/{}/{}".format(img_folder, cnt))
os.makedirs(des_path, exist_ok=True)
cv2.imwrite(os.path.join(des_path, "mask.jpg"), diff)
cv2.imwrite(os.path.join(des_path, "enhance.jpg"), imageEnhance)
cv2.imwrite(os.path.join(des_path, "gray.jpg"), gray)
print("Finish processing frame {}".format(cnt))
cnt += 1
k = cv2.waitKey(10)&0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | [
"534660436@qq.com"
] | 534660436@qq.com |
1f0647a82ec298b7fd062cfa3cc353018b00d7ae | acaed6ad4a2bb3f6df553debf547d0feafdd99e9 | /List/group.py | a2833a6241a9dc45b4a99d6a2fc85facdafbe9f2 | [] | no_license | SAM1363/TH-Python | 764691b7b8281b3298ace985039ee9c05ef340a1 | 421c4d7f54ed56233a87c7d9907ac3d1ab993c94 | refs/heads/master | 2020-05-30T01:12:49.404476 | 2019-08-04T04:45:32 | 2019-08-04T04:45:32 | 189,472,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | musical_groups = [
["Ad Rock", "MCA", "Mike D."],
["John Lennon", "Paul McCartney", "Ringo Starr", "George Harrison"],
["Salt", "Peppa", "Spinderella"],
["Rivers Cuomo", "Patrick Wilson", "Brian Bell", "Scott Shriner"],
["Chuck D.", "Flavor Flav", "Professor Griff", "Khari Winn", "DJ Lord"],
["Axl Rose", "Slash", "Duff McKagan", "Steven Adler"],
["Run", "DMC", "Jam Master Jay"],
]
for each in musical_groups:
member = ', '.join(each)
print(member)
list(musical_groups) | [
"isamu3636136@gmail.com"
] | isamu3636136@gmail.com |
9936ad7e397f768960297e8f9eccd89b14b4501f | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /tests/test_utils.py | 4d48eca5725d58a50d4d971b6a360e9e2bf5ba73 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 983 | py | from typing import Optional
import pytest
from puzzles.utils import NaryNode, TreeNode, make_nary_node, make_tree
@pytest.mark.parametrize(
"vals, expected",
[
([0, 0, None, 0, 0], TreeNode(0, TreeNode(0, TreeNode(0), TreeNode(0)), None)),
(
[0, 0, None, 0, None, 0, None, None, 0],
TreeNode(
0,
TreeNode(0, TreeNode(0, TreeNode(0, None, TreeNode(0)), None), None),
None,
),
),
],
)
def test_make_tree(vals: list[Optional[int]], expected: TreeNode):
assert make_tree(vals) == expected
@pytest.mark.parametrize(
"vals, expected",
[
(
[1, None, 3, 2, 4, None, 5, 6],
NaryNode(
1, [NaryNode(3, [NaryNode(5), NaryNode(6)]), NaryNode(2), NaryNode(4)]
),
)
],
)
def test_make_nary_node(vals, expected):
print(make_nary_node(vals))
assert make_nary_node(vals) == expected
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
47dd5781de4069de4d2e442c24cd2979fc5a0aac | 51aa2894c317f60726fe9a778999eb7851b6be3e | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/003_Placing several components in the box/001_Horizontal and vertical alignment/058_insertStretch.py | 768928415d0c882a97ec7dad9040e389f828a0fe | [] | no_license | pranaymate/Python_Topics | dd7b288ab0f5bbee71d57080179d6481aae17304 | 33d29e0a5bf4cde104f9c7f0693cf9897f3f2101 | refs/heads/master | 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
from PyQt4 import QtGui
import sys
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
window.setWindowTitle("insertStretch")
window.resize(350, 50)
button1 = QtGui.QPushButton("1")
button2 = QtGui.QPushButton("2")
button3 = QtGui.QPushButton("3")
hbox = QtGui.QHBoxLayout()
hbox.addWidget(button1)
hbox.addWidget(button2)
hbox.addWidget(button3)
hbox.insertStretch(2, stretch=1)
window.setLayout(hbox)
window.show()
sys.exit(app.exec_()) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
63079e8da1dd06e4fdf0eb83766653ee242d8090 | bc5484a8173bd937c56fb9a4c0dc6a54a5bb4d62 | /Exceptions.py | 8c72314af3f7b8a37d33bf120bc83e40ad4f2567 | [] | no_license | PiyushChandra17/HackerRank_Python | 4686cc872ac622d1ec7b8206cca0efccdc15f235 | 443fa8484c81a56d652d546fa7cc4a0ba24b8185 | refs/heads/main | 2023-01-24T18:45:55.673808 | 2020-12-09T00:02:16 | 2020-12-09T00:02:16 | 319,788,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | N = int(input())
for _ in range(N):
try:
a,b = map(int,input().split())
print(a//b)
except Exception as e:
print('Error Code:',e)
| [
"noreply@github.com"
] | PiyushChandra17.noreply@github.com |
f1c921a60c4fb3ad157e9299ffd20047aecfaa28 | 94e964496acd225e1a04060a9bc9f639e6cff99c | /app/invitations/migrations/0004_auto_20201102_1100.py | 2fe6602b722d46813c07a701de91615cc619b763 | [] | no_license | indigocodeit/conflictcartographer | 64b6ab2c991cd3ad020c4832cdb26974d342b564 | ab19b4559c1e016ef485bfa1a01df17fb15679ce | refs/heads/master | 2023-03-05T00:44:03.805241 | 2021-02-19T08:50:23 | 2021-02-19T08:50:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.1.2 on 2020-11-02 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invitations', '0003_auto_20201102_0936'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='mailed',
field=models.BooleanField(default=False),
),
]
| [
"pglandsverk@gmail.com"
] | pglandsverk@gmail.com |
93364714c8458b6b06d05de577f9b868ef1c277f | fe4f4ae5583cdc71be78094f639df15e27e77e0c | /saas/extras.py | e3470ed950539c2203c05c299fd2b0dc404dc7c7 | [
"BSD-2-Clause"
] | permissive | petrios888/djaodjin-saas | 6bf803673db55d6a8262b440cd705ed1d4e3bbc7 | 6bedad11bc37d3884addb67c980b1f764803ada7 | refs/heads/master | 2020-04-04T20:32:54.592402 | 2018-09-30T22:53:29 | 2018-09-30T22:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,117 | py | # Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from collections import OrderedDict
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# Implementation Note:
#
# saas.settings cannot be imported at this point because this file will
# be imported before ``django.conf.settings`` is fully initialized.
from .compat import NoReverseMatch, is_authenticated, reverse
from .utils import get_organization_model
class OrganizationMixinBase(object):
"""
Returns an ``Organization`` from a URL.
"""
organization_url_kwarg = 'organization'
def get_organization(self):
return get_object_or_404(get_organization_model(),
slug=self.kwargs.get(self.organization_url_kwarg))
def get_url_kwargs(self):
"""
Rebuilds the ``kwargs`` to pass to ``reverse()``.
"""
url_kwargs = {}
if 'organization' in self.kwargs:
url_kwargs.update({'organization': self.kwargs['organization']})
return url_kwargs
def get_context_data(self, **kwargs):
context = super(OrganizationMixinBase, self).get_context_data(**kwargs)
organization = self.organization
if not organization:
# If we don't even have a broker/provider for a site.
raise Http404(
_("It seems a broker was not defined, or defined incorrectly."))
context.update({'organization': organization})
# XXX These might be moved to a higher-level
urls = {
'api_cart': reverse('saas_api_cart'),
'api_redeem': reverse('saas_api_redeem_coupon'),
'organization_create': reverse('saas_organization_create')
}
# URLs for both sides (subscriber and provider).
urls.update({'organization': {
'api_base': reverse('saas_api_organization', args=(organization,)),
'api_card': reverse('saas_api_card', args=(organization,)),
'api_profile_base': reverse('saas_api_profile'),
'api_subscriptions': reverse(
'saas_api_subscription_list', args=(organization,)),
'billing_base': reverse('saas_billing_base'),
'profile_base': reverse('saas_profile'),
'profile': reverse(
'saas_organization_profile', args=(organization,)),
'billing': reverse('saas_billing_info', args=(organization,)),
'subscriptions': reverse(
'saas_subscription_list', args=(organization,)),
}})
# The following `attached_user` will trigger a db query
# even when `request.user` is anonymous.
if organization.attached_user():
try:
urls['organization'].update({
'password_change': reverse(
'password_change', args=(organization,))})
except NoReverseMatch:
# With django.contrib.auth we cannot trigger password_change
# for a different user than the one associated to the request.
# It is OK. We will just not resolve the link.
pass
else:
urls['organization']['roles'] = OrderedDict()
for role_descr in organization.get_role_descriptions():
urls['organization']['roles'].update({
role_descr.title: reverse('saas_role_detail',
args=(organization, role_descr.slug)),
})
if (organization.is_provider
and is_authenticated(self.request)
and organization.accessible_by(self.request.user)):
provider = organization
urls.update({'provider': {
'api_bank': reverse('saas_api_bank', args=(provider,)),
'api_coupons': reverse(
'saas_api_coupon_list', args=(provider,)),
'api_metrics_plans': reverse(
'saas_api_metrics_plans', args=(provider,)),
'api_plans': reverse('saas_api_plans', args=(provider,)),
'api_receivables': reverse(
'saas_api_receivables', args=(provider,)),
'api_subscribers_active': reverse(
'saas_api_subscribed', args=(provider,)),
'api_subscribers_churned': reverse(
'saas_api_churned', args=(provider,)),
'coupons': reverse('saas_coupon_list', args=(provider,)),
'dashboard': reverse('saas_dashboard', args=(provider,)),
'metrics_coupons': reverse(
'saas_metrics_coupons', args=(provider,)),
'metrics_plans': reverse(
'saas_metrics_plans', args=(provider,)),
'metrics_sales': reverse(
'saas_metrics_summary', args=(provider,)),
'profile': reverse('saas_provider_profile'),
'subscribers': reverse(
'saas_subscriber_list', args=(provider,)),
'transfers': reverse(
'saas_transfer_info', args=(provider,)),
}})
# These might lead to 403 if provider is not broker.
urls.update({'broker': {
'api_users': reverse('saas_api_users'),
'api_users_registered': reverse('saas_api_registered'),
'charges': reverse('saas_charges'),
}})
if is_authenticated(self.request):
urls.update({'profiles': [{
'location': reverse('saas_organization_profile',
args=(account,)), 'printable_name': account.printable_name}
for account in get_organization_model().objects.accessible_by(
self.request.user)]})
self.update_context_urls(context, urls)
return context
@staticmethod
def update_context_urls(context, urls):
if 'urls' in context:
for key, val in six.iteritems(urls):
if key in context['urls']:
if isinstance(val, dict):
context['urls'][key].update(val)
else:
# Because organization_create url is added in this mixin
# and in ``OrganizationRedirectView``.
context['urls'][key] = val
else:
context['urls'].update({key: val})
else:
context.update({'urls': urls})
return context
@property
def organization(self):
if not hasattr(self, '_organization'):
self._organization = self.get_organization()
return self._organization
| [
"smirolo@djaodjin.com"
] | smirolo@djaodjin.com |
1ea2461bb4d5cd62ba56ea2998b7e61b0495fbf9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_ascertained.py | caf37433ed7e596dded7a9c9ce0ed0c6cfe4047d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _ASCERTAINED():
def __init__(self,):
self.name = "ASCERTAINED"
self.definitions = ascertain
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['ascertain']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aa8483d7801ccd73022481b46b35195378df86cd | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /Roland_FA/control_element_utils.py | 7c3b09249e0be193508803db7452627c39577661 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 740 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Roland_FA/control_element_utils.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
import Live
from ableton.v2.base import depends
from ableton.v2.control_surface import MIDI_CC_TYPE
from ableton.v2.control_surface.elements import ButtonElement, EncoderElement
@depends(skin=None)
def make_button(identifier, name, msg_type=MIDI_CC_TYPE, skin=None):
return ButtonElement(True, msg_type, 0, identifier, name=name, skin=skin)
def make_encoder(identifier, name):
return EncoderElement(MIDI_CC_TYPE, 0, identifier, Live.MidiMap.MapMode.absolute, name=name)
| [
"julien@julienbayle.net"
] | julien@julienbayle.net |
8e4d244cc7b9ead7983a37336328ed8efb8412b5 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-tpu/samples/generated_samples/tpu_v2_generated_tpu_list_runtime_versions_async.py | 42f6f965e84337949ebd142d31417dfbf72bbabb | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,873 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListRuntimeVersions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-tpu
# [START tpu_v2_generated_Tpu_ListRuntimeVersions_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import tpu_v2
async def sample_list_runtime_versions():
# Create a client
client = tpu_v2.TpuAsyncClient()
# Initialize request argument(s)
request = tpu_v2.ListRuntimeVersionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_runtime_versions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END tpu_v2_generated_Tpu_ListRuntimeVersions_async]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
8a90740da74279846df059f6cdd3bf485bfdf13b | 5f6160cc922fee51c89eb066adf396183e52279f | /backend/chat/migrations/0001_initial.py | 8b828e045ffc83f7861521d66676f4b3e4a2b8db | [] | no_license | crowdbotics-apps/pira-21713 | 884fabf88e6c08891895c3ba1f214174b8ca46c1 | df0f7b9c18ff9be6ec1cc9e79fb987a49c684353 | refs/heads/master | 2023-02-24T05:51:16.466832 | 2021-01-29T21:51:37 | 2021-01-29T21:51:37 | 305,415,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # Generated by Django 2.2.16 on 2020-10-19 14:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('chat_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('attachment', models.URLField()),
('is_draft', models.BooleanField()),
('is_delivered', models.BooleanField()),
('is_read', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_delivered', models.DateTimeField()),
('timestamp_read', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('thread_photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ThreadMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField()),
('timestamp_joined', models.DateTimeField(auto_now_add=True)),
('timestamp_left', models.DateTimeField()),
('last_rejoined', models.DateTimeField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='ThreadAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='MessageAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_message', to='chat.Message')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_profile', to='chat_user_profile.Profile')),
],
),
migrations.AddField(
model_name='message',
name='sent_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sent_by', to='chat.ThreadMember'),
),
migrations.AddField(
model_name='message',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_thread', to='chat.Thread'),
),
migrations.CreateModel(
name='ForwardedMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp_forwarded', models.DateTimeField(auto_now_add=True)),
('forwarded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_by', to='chat_user_profile.Profile')),
('forwarded_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_to', to='chat.Thread')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_message', to='chat.Message')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e0bb612833f054ba95c0fbc6a3cade75f69976cf | e58fcc1467ad81084b016d2a48d672d75da2c058 | /rdkit/rdkit/DataManip/Metric/__init__.py | ecb2ee43043d5acf993e710bd8c5abe9caf7d837 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ali1810/sol_heroku | 294375d70c656452749e959bfb851a50defc0e01 | 97b548ce7d864e6fed936c53b790c1dc8038cff2 | refs/heads/main | 2023-08-15T06:18:26.933254 | 2021-09-14T10:20:19 | 2021-09-14T10:20:19 | 405,223,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import rdBase
from rdkit.DataManip.Metric.rdMetricMatrixCalc import *
| [
"noreply@github.com"
] | ali1810.noreply@github.com |
235af6e192fec2f1b75f4bd482c67205da7d931b | a67571dc6f4e83e44a90e4802d2f54b22fb21fd2 | /tns_glass/locales/templatetags/locales.py | 9288e224158f458fa254bdc08382626190183645 | [] | no_license | TechnoServe/SMSBookkeeping | 1833690e3329967b6ae731aad2ddb6b93655d935 | cbc816368ba4980ca6ce87c2bda95b76295009f1 | refs/heads/master | 2020-08-03T19:12:13.023005 | 2019-11-05T12:08:45 | 2019-11-05T12:08:45 | 211,856,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | from django import template
from datetime import datetime
from django.utils import simplejson
from ..models import comma_formatted
from django.conf import settings
from django.contrib.humanize.templatetags.humanize import intcomma
import pytz
from decimal import Decimal
register = template.Library()
@register.filter
def local_timezone(value, format="%b %e %Y, %H:%M"):
local = pytz.timezone(settings.USER_TIME_ZONE)
value = value.replace(tzinfo=pytz.utc)
return value.astimezone(local).strftime(format)
@register.filter
def format_int(value):
try:
value = int(value)
return intcomma(value)
except:
return intcomma(value)
@register.filter
def format_percent(value):
if value is None:
return "-"
try:
value = int(value)
return intcomma(value) + "%"
except:
return intcomma(value) + "%"
@register.filter
def format_currency(price, currency):
if price is None or price == '':
return "-"
else:
try:
return currency.format(price)
except:
return price
@register.filter
def format_currency_summary(price, currency):
if price is None or price == '':
return "-"
else:
try:
return currency.format(price, False, "med", "med")
except:
return price
@register.filter
def format_currency_rounded(price, currency):
if price is None or price == '':
return "-"
else:
try:
return currency.format(price, True)
except:
return price
@register.filter
def format_currency_rounded_summary(price, currency):
if price is None or price == '':
return "-"
else:
try:
return currency.format(price, True, "med", "med")
except:
return price
@register.filter
def format_kilos(value):
if value is None or value == '':
return "-"
else:
return comma_formatted(value, False) + " Kg"
@register.filter
def format_tons(value):
if value is None or value == '':
return "-"
else:
tons = comma_formatted((value / Decimal(1000)).quantize(Decimal(".01")), True)
return str(tons) + " mT"
@register.filter
def format_id(national_id, country):
return country.format_id(national_id)
@register.filter
def format_phone(phone, country):
return country.format_phone(phone)
@register.filter
def format_weight(value, weight):
if value is None or value == '':
return ""
else:
try:
return weight.format(value, True)
except:
return value
@register.filter
def format_weight_int(value, weight):
if value is None or value == '':
return ''
else:
try:
return weight.format(value, False)
except:
return value
@register.filter
def format_weight_rounded(value, weight):
if value is None or value == '':
return ''
else:
try:
return weight.format(value, True, True)
except:
return value
@register.filter
def format_weight_rounded_use_decimals(value, weight):
if value is None or value == '':
return ''
else:
try:
return weight.format(value, True, True, True)
except:
return value
@register.filter
def format_weight_rounded_use_one_decimal_place(value, weight):
if value is None or value == '':
return ''
else:
try:
return weight.format(value, True, True, True, True)
except:
return value | [
"56976732+hgtal@users.noreply.github.com"
] | 56976732+hgtal@users.noreply.github.com |
9d9298d491f9355cf0b40cd48b69830babf911c4 | ccd45a60940628e2b5a75ca018936b4aab78e647 | /config.py | 20ccee4fe3602f6cd080595e4283a23f2390bed4 | [] | no_license | GNK48LXZ/bert_test | 8853e041d65bbc9602d3bb61bf4274b1cb6c79f1 | 86c78d8bebbcad8051e5a1f9baba51e6d35588c9 | refs/heads/main | 2023-08-19T03:45:14.757576 | 2021-09-21T08:30:17 | 2021-09-21T08:30:17 | 408,739,774 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import os
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
Config = {
'Corpus_File_Path': os.path.join(PROJECT_PATH, 'Data\preprocess_sentence.txt'),
'Vocabulary_File_Path': os.path.join(PROJECT_PATH, 'Data/vocaba.txt'), # 词表存放位置
'Log_Dir': os.path.join(PROJECT_PATH, 'Logs'),
# 'Saved_Weight': os.path.join(PROJECT_PATH, 'Saved_Weight'),
'Saved_Weight': os.path.join(PROJECT_PATH, 'Saved_Weight_256d_3t'),
'Character_Frequency_Threshold': 3,
'Segment_Size': 2,
'Batch_Size': 64,
'Max_Sequence_Length': 128, # 最大长度
'Mask_Rate': 0.15,
'Vocab_Size': 2367,
'Embedding_Size': 256,
'Num_Transformer_Layers': 3,
'Num_Attention_Heads': 8,
'Intermediate_Size': 1024,
'Initializer_Variance': 0.02, # 权重初始化方差,默认0.02
}
| [
"your email"
] | your email |
6cc4471719509120c666393dc2e7eed5b40f35f8 | 8af8290b5aa09b8764e1e06e35bef43571c38cbc | /numpy_study/_07deep_stack_split.py | 6bdadf9695fb02266461c6e56f3cdabea0493954 | [] | no_license | wochonglai/AI_learn | cad527c0c5a88bcfea3072a544547da486528e71 | f06dd05e59046492469699cbac6d4064739b4a72 | refs/heads/master | 2020-03-26T07:59:52.879435 | 2018-10-03T01:33:39 | 2018-10-03T01:33:39 | 144,681,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
import numpy as np
a = np.arange(11,20).reshape(3,3)
print('a:',a)
b = a +10
print("b:",b)
# 深度合并
c = np.dstack((a,b))
print('c:',c)
# 深度拆分
d,e = np.dsplit(c,2)
print('d:',d)
print('e:',e)
print('dT:',d.T[0].T)
print('eT:',e.T[0].T)
| [
"noreply@github.com"
] | wochonglai.noreply@github.com |
bc131fb89cd26403efa2f9589f1cd1f5f69415eb | ebba19c671eb241e9363700aac0f3f91489fc4bf | /Tareas/T01/entidades.py | 071efbf4e539b4606e15d4ae39ce1a40ff39e08b | [
"MIT"
] | permissive | FarDust/FarDust-IIC2233 | 6358a489399428e2ec621ffb0f82eb88c3c38226 | f941be5fc2f3eab48ed4a78cca29ddac7ca2a26d | refs/heads/master | 2023-04-06T07:10:14.636304 | 2021-04-19T19:24:56 | 2021-04-19T19:24:56 | 103,351,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Entidades v1.0.0
from abc import ABCMeta, abstractmethod
class Entidades(metadata=ABCMeta):
def __init__(self, nombre):
self.nombre = nombre
self.usuario = None
class ANAF(Entidades):
def __init__(self):
self.empleados = []
pass
class Persona(Entidades):
def __init__(self):
pass
class Empleado(Persona):
def __init__(self):
pass
class Piloto(Persona):
def __init__(self):
self.aeronave = None
pass
class Jefe(Persona):
def __init__(self):
pass
| [
"gnfaundez@uc.cl"
] | gnfaundez@uc.cl |
f330147c75c69e8d0f284de36f81d0239b97a513 | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/types/page_block_table.py | b8a0e04514fa250dcf57fc03851043c842f17357 | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py |
from ..utils import Object
class PageBlockTable(Object):
"""
A table
Attributes:
ID (:obj:`str`): ``PageBlockTable``
Args:
caption (:class:`telegram.api.types.RichText`):
Table caption
cells (List of List of :class:`telegram.api.types.pageBlockTableCell`):
Table cells
is_bordered (:obj:`bool`):
True, if the table is bordered
is_striped (:obj:`bool`):
True, if the table is striped
Returns:
PageBlock
Raises:
:class:`telegram.Error`
"""
ID = "pageBlockTable"
def __init__(self, caption, cells, is_bordered, is_striped, **kwargs):
self.caption = caption # RichText
self.cells = cells # list of list(pageBlockTableCell)
self.is_bordered = is_bordered # bool
self.is_striped = is_striped # bool
@staticmethod
def read(q: dict, *args) -> "PageBlockTable":
caption = Object.read(q.get('caption'))
cells = [[Object.read(v) for v in i] for i in q.get('cells', [])]
is_bordered = q.get('is_bordered')
is_striped = q.get('is_striped')
return PageBlockTable(caption, cells, is_bordered, is_striped)
| [
"me@amirh.co"
] | me@amirh.co |
420b4c8e12f33757ae20a038063d0bc5b90214cf | 8ebf6311c3c1db40c7bb56051cf4e37e1b85a4f9 | /rm-server/gateway/gateway/router/templatemanager/document/comments/classify.py | 07a9a30736b52f8da7942eccb87bc4af038bbcdc | [] | no_license | sq591442679/requirements-manager | e8b074afb7fd2a83632f2546d392dab4c35aeeeb | 6d664ce338b455150dcc9a86145967e8dd67a9dd | refs/heads/master | 2023-07-08T04:38:20.064019 | 2021-08-11T03:41:13 | 2021-08-11T03:41:13 | 392,877,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from flask import request
from gateway.app import app
from gateway.utils.handle_api import (
get_client_username, handle_request_response
)
from gateway.http_client import templatemanager_http_client
@app.route('/document/comments/classify', methods=['POST'])
@handle_request_response
@get_client_username
def comments_classsify(client_username: str):
body = request.json
status_code, resp_body = templatemanager_http_client.post(
'document/comments/classify', client_username, json=body
)
return status_code, resp_body
| [
"591442679@qq.com"
] | 591442679@qq.com |
2978d2a3a3e6e231bc6ae317f19693320024810a | d73409535734a788af83a9b2b2e32dd1b979d5d2 | /proxySTAR_V3/certbot/venv.1509389747.bak/lib/python2.7/site-packages/zope/component/persistentregistry.py | 8b1b4e6bf91b34de0bb6b132fff5bfdb890411a4 | [
"Apache-2.0",
"MIT"
] | permissive | mami-project/lurk | adff1fb86cb3e478fe1ded4cbafa6a1e0b93bfdd | 98c293251e9b1e9c9a4b02789486c5ddaf46ba3c | refs/heads/master | 2022-11-02T07:28:22.708152 | 2019-08-24T19:28:58 | 2019-08-24T19:28:58 | 88,050,138 | 2 | 2 | NOASSERTION | 2022-10-22T15:46:11 | 2017-04-12T12:38:33 | Python | UTF-8 | Python | false | false | 2,102 | py | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Persistent component managers
"""
from persistent import Persistent
from persistent.mapping import PersistentMapping
from persistent.list import PersistentList
from zope.interface.adapter import VerifyingAdapterRegistry
from zope.interface.registry import Components
class PersistentAdapterRegistry(VerifyingAdapterRegistry, Persistent):
def changed(self, originally_changed):
if originally_changed is self:
self._p_changed = True
super(PersistentAdapterRegistry, self).changed(originally_changed)
def __getstate__(self):
state = super(PersistentAdapterRegistry, self).__getstate__().copy()
for name in self._delegated:
state.pop(name, 0)
state.pop('ro', None)
return state
def __setstate__(self, state):
bases = state.pop('__bases__', ())
super(PersistentAdapterRegistry, self).__setstate__(state)
self._createLookup()
self.__bases__ = bases
self._v_lookup.changed(self)
class PersistentComponents(Components):
def _init_registries(self):
self.adapters = PersistentAdapterRegistry()
self.utilities = PersistentAdapterRegistry()
def _init_registrations(self):
self._utility_registrations = PersistentMapping()
self._adapter_registrations = PersistentMapping()
self._subscription_registrations = PersistentList()
self._handler_registrations = PersistentList()
| [
"diego.deaguilarcanellas@telefonica.com"
] | diego.deaguilarcanellas@telefonica.com |
dba584dd19b9cd732105b0d9baf0c1de3bdb0802 | bccfab4d853f7417401a084be95de293e66ccd2a | /mySpider/spiders/Collection36.py | 56bf0105f695645fcbfb1f8d4c5cc72613a4f51e | [] | no_license | CS1803-SE/The-First-Subsystem | a8af03ce04a9de72a6b78ece6411bac4c02ae170 | 4829ffd6a83133479c385d6afc3101339d279ed6 | refs/heads/main | 2023-05-06T02:32:08.751139 | 2021-05-24T06:09:37 | 2021-05-24T06:09:37 | 363,400,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2021/5/1 17:28
# @Author : ana
# @File : Collection36.py
# @Software: PyCharm
from ..items import *
from ..str_filter import *
from ..auxiliary_files import Collection36_supporting
class Collection36(scrapy.Spider):
name = "Collection36"
allowed_domains = ['lvshunmuseum.org']
start_urls = Collection36_supporting.Collection36Supporting.startUrl
custom_settings = {
'ITEM_PIPELINES': {
'mySpider.pipelines.CollectionPipeLine': 301,
},
'DOWNLOADER_MIDDLEWARES': {
'mySpider.middlewares.DefaultMiddleware': 0,
},
}
def parse(self, response, **kwargs):
li_list = response.xpath("//*[@id='tab']/div/div/div[2]/ul/li")
print(len(li_list))
for li in li_list:
item = CollectionItem()
item["museumID"] = 36
item["museumName"] = "旅顺博物馆"
item['collectionName'] = StrFilter.filter(li.xpath("./a/div[1]/img/@alt").extract_first()).replace('[',
'').replace(
']', '')
item['collectionImageLink'] = 'http://www.lvshunmuseum.org' + str(li.xpath(
"./a/div[1]/img/@src").extract_first())[1:]
url = "http://www.lvshunmuseum.org" + str(li.xpath("./a/@href").extract_first())[1:]
yield scrapy.Request(
url,
callback=self.parseAnotherPage,
meta={"item": item}
)
def parseAnotherPage(self, response):
item = response.meta["item"]
item['collectionIntroduction'] = StrFilter.filter(
response.xpath("//*[@id='showcasescontent']/div/div[3]/p").xpath('string(.)').extract_first()).replace('[',
'').replace(
']', '')
print(item)
yield item
| [
"1300978939@qq.com"
] | 1300978939@qq.com |
914c7f5609eddc9843dab1308819682ab8de7b95 | 6c219c027c7d0ef454bdeac196bd773e8b95d602 | /system/tomcat/tomcat_weak_pass.py | 02071bb0dd5c7a622b9e690d910e97973b11f9f4 | [] | no_license | aStrowxyu/pocscan | 663f3a3458140e1bce7b4dc3702c6014a4c9ac92 | 08c7e7454c6b7c601bc54c21172c4788312603b1 | refs/heads/master | 2020-04-19T10:00:56.569105 | 2019-01-29T09:31:31 | 2019-01-29T09:31:31 | 168,127,418 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Tomcat 弱口令漏洞
referer: unknown
author: Lucifer
description: tomcat 后台弱口令。
'''
import sys
import json
import base64
import requests
import warnings
from termcolor import cprint
class tomcat_weak_pass_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
userlist = ["tomcat","admin"]
passlist = ["tomcat", "123456", "admin"]
payload = "/manager/html"
vulnurl = self.url + payload
for username in userlist:
for password in passlist:
try:
headers = {
"Authorization":"Basic "+base64.b64encode(bytes(username.encode())+b":"+bytes(password.encode())).decode(),
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if req.status_code == 200 and r"Applications" in req.text and r"Manager" in req.text:
cprint("[+]存在Tomcat 弱口令漏洞...(高危)\tpayload: "+vulnurl+"\npost: "+json.dumps({username:password}, indent=4), "red")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = tomcat_weak_pass_BaseVerify(sys.argv[1])
testVuln.run()
| [
"wangxinyu@vackbot.com"
] | wangxinyu@vackbot.com |
3e163d3f7184f533ac5ac44fb4d0b92ce5ed07a3 | 30cffb7452220c2ac2961dd2e0f42e3b359a59c0 | /simscale_sdk/models/volume_heat_flux_bc.py | 4c19bc6fa2fc9b31e93ea47336624efae4391462 | [
"MIT"
] | permissive | vpurcarea/simscale-python-sdk | 0bf892d8824f8d4599caa0f345d5ba28e038f5eb | 6f2d12b2d21142bd854042c0fb402c2c797629e4 | refs/heads/master | 2023-03-14T04:31:06.226337 | 2021-03-03T16:20:01 | 2021-03-03T16:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,793 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class VolumeHeatFluxBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'name': 'str',
'heatflux_value': 'DimensionalFunctionVolumetricPower',
'topological_reference': 'TopologicalReference'
}
attribute_map = {
'type': 'type',
'name': 'name',
'heatflux_value': 'heatfluxValue',
'topological_reference': 'topologicalReference'
}
def __init__(self, type='VOLUME_HEAT_FLUX', name=None, heatflux_value=None, topological_reference=None, local_vars_configuration=None): # noqa: E501
"""VolumeHeatFluxBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._name = None
self._heatflux_value = None
self._topological_reference = None
self.discriminator = None
self.type = type
if name is not None:
self.name = name
if heatflux_value is not None:
self.heatflux_value = heatflux_value
if topological_reference is not None:
self.topological_reference = topological_reference
@property
def type(self):
"""Gets the type of this VolumeHeatFluxBC. # noqa: E501
:return: The type of this VolumeHeatFluxBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VolumeHeatFluxBC.
:param type: The type of this VolumeHeatFluxBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def name(self):
"""Gets the name of this VolumeHeatFluxBC. # noqa: E501
:return: The name of this VolumeHeatFluxBC. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VolumeHeatFluxBC.
:param name: The name of this VolumeHeatFluxBC. # noqa: E501
:type: str
"""
self._name = name
@property
def heatflux_value(self):
"""Gets the heatflux_value of this VolumeHeatFluxBC. # noqa: E501
:return: The heatflux_value of this VolumeHeatFluxBC. # noqa: E501
:rtype: DimensionalFunctionVolumetricPower
"""
return self._heatflux_value
@heatflux_value.setter
def heatflux_value(self, heatflux_value):
"""Sets the heatflux_value of this VolumeHeatFluxBC.
:param heatflux_value: The heatflux_value of this VolumeHeatFluxBC. # noqa: E501
:type: DimensionalFunctionVolumetricPower
"""
self._heatflux_value = heatflux_value
@property
def topological_reference(self):
"""Gets the topological_reference of this VolumeHeatFluxBC. # noqa: E501
:return: The topological_reference of this VolumeHeatFluxBC. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this VolumeHeatFluxBC.
:param topological_reference: The topological_reference of this VolumeHeatFluxBC. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeHeatFluxBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VolumeHeatFluxBC):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
495c8590432112853a5e32efd49fc8c474f63fe8 | e8a6e0e218ce4bb51388e221e3f072f0a7c952c3 | /SendGrid/City's Happiness/mission-backup.py | 55013ca4f520219ca865870181a54ef4dc636247 | [] | no_license | imtiaz-rahi/Py-CheckiO | 34c553f08e2ec7ec3566fb3a0c6da371949470aa | 588b8e513b250040a4f1c37cf3c499dbdf2244d8 | refs/heads/master | 2020-07-04T13:59:10.297822 | 2019-12-05T15:42:18 | 2019-12-05T15:42:18 | 202,305,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | import collections
from itertools import chain
def find_dups(data, val):
return [k for k, v in data.items() if v == val]
def most_crucial(net, users):
max_user = max(users, key=users.get)
if len(find_dups(users, users[max_user])) == 1:
return [max_user]
# print(len(users.values()) == len(set(users.values())))
# Create counter of user and their count in net
network = collections.Counter([it for sub in net for it in sub])
print(network)
# check if multiple user has same count
# if not len(network.values()) == len(set(network.values())):
# for k, v in network.items():
# network[k] = v * users[k]
# print(network)
return [max(network, key=network.get)]
if __name__ == '__main__':
assert most_crucial([
["A", "B"],
["B", "C"],
["C", "D"]
], {
"A": 100,
"B": 1,
"C": 97,
"D": 1}) == ['A']
assert most_crucial([
['A', 'B'],
['B', 'C']
], {
'A': 10,
'B': 10,
'C': 10
}) == ['B'], 'First'
assert most_crucial([
['A', 'B']
], {
'A': 20,
'B': 10
}) == ['A'], 'Second'
assert most_crucial([
['A', 'B'],
['A', 'C'],
['A', 'D'],
['A', 'E']
], {
'A': 0,
'B': 10,
'C': 10,
'D': 10,
'E': 10
}) == ['A'], 'Third'
assert most_crucial([
['A', 'B'],
['B', 'C'],
['C', 'D']
], {
'A': 10,
'B': 20,
'C': 10,
'D': 20
}) == ['B'], 'Forth'
print('Nobody expected that, but you did it! It is time to share it!')
| [
"imtiaz.rahi@naztech.us.com"
] | imtiaz.rahi@naztech.us.com |
c249268db1786d8cdb7bf00127d40f7646a30e50 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1903.py | b51d36f8d6e5a86356943e98873a8ade2bd63789 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | # qubit number=5
# total number=71
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[1]) # number=29
prog.cz(input_qubit[3],input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=31
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.h(input_qubit[0]) # number=64
prog.cz(input_qubit[1],input_qubit[0]) # number=65
prog.h(input_qubit[0]) # number=66
prog.x(input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=57
prog.cz(input_qubit[1],input_qubit[0]) # number=58
prog.h(input_qubit[0]) # number=59
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[1],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.h(input_qubit[4]) # number=41
prog.h(input_qubit[0]) # number=61
prog.cz(input_qubit[1],input_qubit[0]) # number=62
prog.h(input_qubit[0]) # number=63
prog.cx(input_qubit[0],input_qubit[1]) # number=68
prog.x(input_qubit[1]) # number=69
prog.cx(input_qubit[0],input_qubit[1]) # number=70
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[1]) # number=67
prog.x(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=42
prog.cz(input_qubit[0],input_qubit[3]) # number=43
prog.h(input_qubit[3]) # number=44
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(0.6157521601035993,input_qubit[1]) # number=60
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1903.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
d9141d1e99e4af79e76fdbd167877c57e8730b0b | eddbf9518e7384f0e9a1d9e19cbe74855c3f24bd | /2017013401_ouyyn/2017013401_mongoDB.py | 6b1b3f22a4e98ecb2157c86b25e8007a9244b964 | [] | no_license | wanghan79/2019_Python | 9d2391d799efd9545b2afb3565bc5c6d542d1d86 | f856409af92af3990773966d937d58d9d1cade04 | refs/heads/master | 2020-05-05T12:54:30.921361 | 2019-07-20T09:50:03 | 2019-07-20T09:50:03 | 180,050,522 | 11 | 14 | null | 2019-07-15T15:00:03 | 2019-04-08T01:59:24 | Python | UTF-8 | Python | false | false | 1,872 | py | import pymongo
import urllib
import threading
from urllib import request
"""在MongoDB中,定义一个数据库"""
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
db = myclient.test
collection = db['test']
"""定义一个MongoDB操作的类"""
class mongo_operation(threading.Thread):
def __init__(self,num):
threading.Thread.__init__(self)
self.num = num
"""将100000条数据存入MongoDB"""
def save_into_mongo(filename):
f = open(filename, 'r')
for i in f.readlines():
db.students.insert({'test':i})
r=mongo_operation(i)
r.start()
"""查询记录总数"""
def count(self, table, condition=None):
try:
self.db[table].count(condition)
print('查找成功')
except Exception as e:
print(e)
"""插入单条数据"""
def insert(self, table, data):
try:
self.db[table].insert(data)
print('插入成功')
except Exception as e:
print(e)
"'按条件删除记录'"
def delete(self, table, condition, one=False):
try:
if one:
self.db[table].delete_one(condition)
print('删除成功')
else:
result = self.db[table].delete_many(condition)
print('删除成功')
return result
except Exception as e:
print(e)
if __name__ == '__main__':
mongo_operation.save_into_mongo('random_output.txt')
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
5ebc086f0d77e202299f3b3be477c9f71d6911c6 | 0617c812e9bf58a2dbc1c1fef35e497b054ed7e4 | /venv/Lib/site-packages/pyrogram/raw/functions/channels/get_participants.py | 4421b21274922f390d1e789624cfb6e888760da2 | [] | no_license | howei5163/my_framework | 32cf510e19a371b6a3a7c80eab53f10a6952f7b2 | 492c9af4ceaebfe6e87df8425cb21534fbbb0c61 | refs/heads/main | 2023-01-27T14:33:56.159867 | 2020-12-07T10:19:33 | 2020-12-07T10:19:33 | 306,561,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetParticipants(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0x123e05e9``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
filter: :obj:`ChannelParticipantsFilter <pyrogram.raw.base.ChannelParticipantsFilter>`
offset: ``int`` ``32-bit``
limit: ``int`` ``32-bit``
hash: ``int`` ``32-bit``
Returns:
:obj:`channels.ChannelParticipants <pyrogram.raw.base.channels.ChannelParticipants>`
"""
__slots__: List[str] = ["channel", "filter", "offset", "limit", "hash"]
ID = 0x123e05e9
QUALNAME = "pyrogram.raw.functions.channels.GetParticipants"
def __init__(self, *, channel: "raw.base.InputChannel", filter: "raw.base.ChannelParticipantsFilter", offset: int, limit: int, hash: int) -> None:
self.channel = channel # InputChannel
self.filter = filter # ChannelParticipantsFilter
self.offset = offset # int
self.limit = limit # int
self.hash = hash # int
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetParticipants":
# No flags
channel = TLObject.read(data)
filter = TLObject.read(data)
offset = Int.read(data)
limit = Int.read(data)
hash = Int.read(data)
return GetParticipants(channel=channel, filter=filter, offset=offset, limit=limit, hash=hash)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(self.channel.write())
data.write(self.filter.write())
data.write(Int(self.offset))
data.write(Int(self.limit))
data.write(Int(self.hash))
return data.getvalue()
| [
"houwei5163"
] | houwei5163 |
80c10ca7be99a36f645a76e73a3ef3a2ddacbf3d | e5b778a273e3888ad0575a9dada39d458158127a | /page_extend/menu.py | af9ee20528dfdba95ea996ccd54becce8c5e8ad0 | [] | no_license | SevenLines/django-tealeaf | 896784baead7b9514e83edad8c3c2defdcdd060b | 959dbcbdd37a4e8f45de400e71710c5e746a97da | refs/heads/master | 2021-01-23T00:01:43.793383 | 2015-05-15T15:58:52 | 2015-05-15T15:58:52 | 17,891,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from cms.models import Page
from menus.base import Modifier
from menus.menu_pool import menu_pool
class PageExtendModifier(Modifier):
"""
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if breadcrumb:
return nodes
for node in nodes:
page = Page.objects.get(pk=node.id)
pageextend = page.pageextend if hasattr(page, "pageextend") else None
if pageextend:
node.icon = pageextend.image
node.touchable = pageextend.touchable
node.authentication_required = pageextend.authentication_required
return nodes
menu_pool.register_modifier(PageExtendModifier) | [
"mmailm@mail.ru"
] | mmailm@mail.ru |
000fa7d336c18bb5123c8819e4e0034612b94e24 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/124/24859/submittedfiles/main.py | 3274f178ccd8d3ee62b0080a390c7e6db74611c3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- coding: utf-8 -*-
from __future__ import division
import funcoes
import vabsol(m)
#COMECE AQUI
m = int(input('Digite o número m de termos da fórmula de pi: '))
epsilon = input('Digite o epsilon para o cálculo da razão áurea: ')
m = vabsol(m)
print('Valor aproximado de pi: %.15f' %calculopi(m))
print('Valor aproximado da razão áurea: %.15f' %razaurea(m, epsilon)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5a3f0953a5f5150f348eba1aafea1639a6d67dde | 7c14aff73d1266b81944db01c7c4d374f40668a4 | /RNN_MINST_cls.py | 704de130697e4b2b32c21f0d94c27097ceac8eec | [
"Apache-2.0"
] | permissive | oushu1zhangxiangxuan1/learn-tensorflow | ec19d20cf41dc186b9ac7f7de47d5574f30d6ff9 | e83f8633fcbfd428ee3495b18b75ca78c7a25331 | refs/heads/master | 2020-08-02T10:28:46.143332 | 2019-10-30T08:05:23 | 2019-10-30T08:05:23 | 211,318,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(1)
# 导入数据
minst = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyperparameters
lr = 0.001 # learning rate
training_iters = 100000
batch_size = 128
n_inputs = 28
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10
x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_classes])
# weights biases init
weights = {
# shape (28,128)
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
# shape (128,10)
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases = {
# shape(128,)
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
# shape(10, )
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
def RNN(X, weights, biases):
# X==>(128 batches * 28 steps, 28 inputs)
X = tf.reshape(X, [-1, n_inputs])
# X_in = W*X + b
X_in = tf.matmul(X, weights['in']) + biases['in']
# X_in ==> (128 batches, 28 steps, 128 hidden) trans back to 3-dimenson
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
n_hidden_units, forget_bias=1.0, state_is_tuple=True)
init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(
lstm_cell, X_in, initial_state=init_state, time_major=False)
# Methods 1
results = tf.matmul(final_state[1], weights['out']) + biases['out']
# Methods 2
# outputs = tf.unstack(tf.transpose(outputs, [1, 0, 2]))
# results = tf.matmul(outputs[-1], weights['out']) + biases['out']
return results
pred = RNN(x, weights, biases)
# def softmax_cross_entropy_with_logits_v2(
# _sentinel=None, # pylint: disable=invalid-name
# labels=None,
# logits=None,
# dim=-1,
# name=None)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as s:
s.run(init)
step = 0
while step*batch_size < training_iters:
batch_xs, batch_ys = minst.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
s.run([train_op], feed_dict={
x: batch_xs,
y: batch_ys,
})
if step % 20 == 0:
print(s.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys,
}))
step += 1
| [
"zhang.xiangxuan@oushu.io"
] | zhang.xiangxuan@oushu.io |
ba5ea63f191283a9688c147dff9cfe8c6b3cf2a8 | 668188f5368680567be8c4af55a731e45a2380ba | /util_fix_pct_party.py | e6921735cd7015ca3905a22159709c6fda0e8daf | [] | no_license | wrishel/Ballot_tally | ec6ff128e61d6ebfe91574c9b55e083849665502 | 3e4ed8c4fe0503ead9b55fac77d3cfcd97c73c41 | refs/heads/master | 2023-06-24T17:35:51.537643 | 2021-07-28T18:38:32 | 2021-07-28T18:38:32 | 387,261,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | #!/usr/bin/env python3
"""Reset the unnormalized precinct and party fields in the Images table
by reinterpreting the barcode_upper field.
"""
import dbase
import datetime
from ETP_util import fullpath_to_image, subpath_to_image
import GLB_globs
import etpconfig
import os
import sys
import time
TESTING = True
pid = os.getpid()
if __name__ == '__main__':
GLB = GLB_globs.get()
config = GLB.config
PATH_TO_IMAGES = config['Election']['pathtoimages']
tot_processed = 0
start_time = datetime.datetime.now()
if TESTING:
db = dbase.ETPdb()
db.connect('testing')
else:
db = dbase.ETPdb()
db.connect('production')
decode_bc = dict()
for row in db.get_barcodes():
assert row.barcode not in decode_bc
decode_bc[row.barcode] = (row.precinct_id, row.party_id)
tries = 0
# get a list of rows to fix
#
rows_to_fix = db.get_images_for_barcode(pid, 10) # batch of 10
fixes = [] # tuples of (precinct, page_number, image_number)
last_img_num = None
for row in rows_to_fix:
image_num = row.image_number
last_img_num = image_num
pth = fullpath_to_image(image_num)
precinct = None
party_id = None
pagenum = None
# outputs
# Possible errors precinct party page
# -------- ----- ----
# 1) no file for this number MISSING MISSING MSG
# 2) upper barcode missing UNKNOWN UNKNOWN --
# 3) lower barcode missing -- -- UNK
# 4) upper barcode doesn't UNREC UNREC --
# translate
try:
barcodes = hgbt.getBallotBarcodes(pth)
except IOError as e:
print(f'{e}', sys.stderr)
barcodes = (None, None)
precinct = party_id = 'MISSNG'
pagenum = 'MSG'
else:
pagenum = page_num(barcodes[1]) # may be None
if pagenum is None: pagenum = 'UNK'
if barcodes[0] is None:
precinct = party_id = 'UNKNOWN'
else:
try:
(precinct, party_id) = decode_bc[barcodes[0]]
except KeyError:
# print(image_num, 'Unknown', barcode)
precinct = party_id = 'UNREC'
fixes.append((precinct, pagenum, party_id, barcodes[0], barcodes[1], image_num))
time.sleep(.15) # avoid starving the UI
tot_processed += len(fixes)
if len(fixes) != 0:
print(pid, 'processed', tot_processed, last_img_num,
datetime.datetime.now() - start_time)
db.update_unscanned_images(fixes)
if stopflag:
t = datetime.datetime.now() - start_time
print(f'===> pid {pid} exiting after interrupt, total processed={tot_processed}, time={t}')
exit(0)
# t = .20 starves the UI in fast simulation. Probably not in operation
# if len(fixes) != 0:
# t = 1
# print(f'{pid} dozing')
# else:
t = .25
time.sleep(t) # give other processes a chance
| [
"wrishel@gmail.com"
] | wrishel@gmail.com |
7f83ccac64e8a53e8d25e677d1b7ce5405fae100 | f5624e046836723fa065d47d11d52b8b3e448e7b | /espy/splitlist.py | a67fd9453ad4756a09f5179de45f048c6c7f82be | [] | no_license | esheldon/espy | 99daafcf9f81b1465994a0e717095a8ea0bcbfc8 | 090e6f4c65c2dc3acef3c2d98b2971d8c1002787 | refs/heads/master | 2023-07-20T23:49:18.662509 | 2023-07-12T15:40:24 | 2023-07-12T15:40:24 | 1,374,930 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | def split_list(els, nchunks):
nel = len(els)
chunksize = nel // nchunks
extra_items = nel % nchunks
chunks = []
start = 0
for i in range(nchunks):
this_chunksize = chunksize
if i < extra_items:
this_chunksize += 1
end = start + this_chunksize
chunk = els[start:end]
chunks.append(chunk)
start = start + this_chunksize
return [
chunk for chunk in chunks if len(chunk) > 0
]
def write_chunks(chunks, prefix, suffix):
nchunks = len(chunks)
cformat = '%0' + str(len(str(nchunks))) + 'i'
name_format = prefix + cformat + suffix
for i, chunk in enumerate(chunks):
fname = name_format % i
print(fname)
with open(fname, 'w') as fobj:
for el in chunk:
fobj.write(el)
| [
"erin.sheldon@gmail.com"
] | erin.sheldon@gmail.com |
1ae6af2cce4447358311e46e19be97470cce2616 | b9a131dd85fe5f2d2f5b16c97b1f859ede5a4914 | /Curso_em_Vídeo/CalculoMedia.py | 99126bab46b183c7dfc1ae6d976078cbf62a1650 | [] | no_license | juancassioo/python-sistemas | 131f218bf8fa1bebf1bc6e5fbe3222571ca7a42f | 378596d1c630357b1b1958a3b4e3e7f6f96dd5d1 | refs/heads/main | 2023-07-04T20:27:22.859839 | 2021-08-09T01:10:37 | 2021-08-09T01:10:37 | 394,105,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1+nota2)/2
if media >= 4 and media < 7:
print('Apto a avaliação final')
elif media < 4:
print('Reprovado')
else:
('Aprovado')
print(media) | [
"noreply@github.com"
] | juancassioo.noreply@github.com |
7245d605b82b94a01fd481256157eec9bc376170 | 67612c27c6d79ae180a5bc266833899abfefe9f5 | /152. Maximum Product Subarray.py | c0dac2b1ad7f683ccdf47d6b5d6e01df83f751e2 | [] | no_license | Katherinaxxx/leetcode | 7e9d0bd7dc613a824116f1247f42bfc33e485ff3 | dcebf49d1e024b9e69c4d9606c8afb32b9d07029 | refs/heads/master | 2023-01-27T20:14:09.459296 | 2023-01-08T07:01:53 | 2023-01-08T07:01:53 | 215,688,672 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/11/13 上午11:57
@Author : Catherinexxx
@Site :
@File : 152. Maximum Product Subarray.py
@Software: PyCharm
"""
# DP 乘积正负号 因此要保存最大值最小值 负数大小交换
# class Solution:
# def maxProduct(self, nums: List[int]) -> int:
# imax = 1
# imin = 1
# res = float('-inf')
# n = len(nums)
# for i in range(n):
# if(nums[i]<=0):
# imax, imin = imin, imax
# imax = max(imax*nums[i], nums[i])
# imin = min(imin*nums[i], nums[i])
# res = max(res, imax)
# return res
# 国际站
class Solution:
def maxProduct(self, A):
B = A[::-1] # 倒序
for i in range(1, len(A)):
A[i] *= A[i - 1] or 1
B[i] *= B[i - 1] or 1
return max(A + B) | [
"359391236@qq.com"
] | 359391236@qq.com |
2f26fdc0fa04f60227982e45b6f007e513c9edb9 | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/models/network_nic_link_duplex_mode20.py | a1f674f04d95c2db1e574d1bd892e439069155e9 | [] | no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class NetworkNicLinkDuplexMode20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
UNKNOWN = "unknown"
FULL = "full"
HALF = "half"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""NetworkNicLinkDuplexMode20 - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkNicLinkDuplexMode20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkNicLinkDuplexMode20):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkNicLinkDuplexMode20):
return True
return self.to_dict() != other.to_dict()
| [
"root@s6006st157.petrobras.biz"
] | root@s6006st157.petrobras.biz |
8f07d9cbe7ec45103b3e054e55955ac2957f7213 | c5d553e68de3d5c730f5fe2550209de759eabc8c | /1929 소수 구하기.py | 0598ecfbae20cc2a1f69cbce7d06bbde3eb36502 | [] | no_license | KimMooHyeon/Algorithm-Studying | 6bb23b971b0c46c35f4cdde133148f2c5cfaa0f4 | e4417aadf209fd22f960239623bed542744fd374 | refs/heads/master | 2023-08-08T02:28:02.460332 | 2023-07-15T14:22:53 | 2023-07-15T14:22:53 | 198,966,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
M,N=map(int,input().split())
check_dp=[0]*(N+1)
sosu_arr=[]
for i in range(2,N+1):
if check_dp[i] == 0 :
if i >=M:
print(i)
i_num=i
for j in range(i,N+1,i_num):
check_dp[j]=1
| [
"dlfb77@gmail.com"
] | dlfb77@gmail.com |
c66da0093d768643a519fc3c1896367ef095410b | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_cumsum/trend_linear/cycle_30/ar_12/test_artificial_1024_cumsum_linear_30_12_100.py | 812701238d3aa0cdd90c6e52e27439a111ee85e6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 30, transform = "cumsum", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
f8898597a2689911dfc7b0dc3b0e25955c465047 | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/lib2to3/fixes/fix_raise.py | 050118e90c4212a90b1fa805531431d0318601af | [
"MIT"
] | permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 2,209 | py | """Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results['exc'].clone()
if exc.type == token.STRING:
msg = 'Python 3 does not support string exceptions'
self.cannot_convert(node, msg)
return
if is_tuple(exc):
while is_tuple(exc):
exc = exc.children[1].children[0].clone()
exc.prefix = ' '
if 'val' not in results:
new = pytree.Node(syms.raise_stmt, [Name('raise'), exc])
new.prefix = node.prefix
return new
val = results['val'].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ''
args = [val]
if 'tb' in results:
tb = results['tb'].clone()
tb.prefix = ''
e = exc
if val.type != token.NAME or val.value != 'None':
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name('raise')] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt, [Name('raise'), Call(exc,
args)], prefix=node.prefix)
| [
"emily@cuttlesoft.com"
] | emily@cuttlesoft.com |
3dccca44a60a3000863d6a65edd0d6b9ce25723f | 5292b03998384c0d2bb5858058892d7e45c5365b | /C3CTF/2017/lfa/server.py | 6153f36b52fbf1bfae1f787c826eee6c154d6d5b | [
"MIT"
] | permissive | TheusZer0/ctf-archives | 430ef80d367b44fd81449bcb108e367842cb8e39 | 033ccf8dab0abdbdbbaa4f0092ab589288ddb4bd | refs/heads/main | 2023-09-04T17:56:24.416820 | 2021-11-21T06:51:27 | 2021-11-21T06:51:27 | 430,603,430 | 1 | 0 | MIT | 2021-11-22T07:24:08 | 2021-11-22T07:24:07 | null | UTF-8 | Python | false | false | 729 | py | #!/usr/bin/python
import tempfile
import os
import string
import random
def randstr():
return ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(10))
code = "require 'LFA'\n"
code += "syscall 1, 1, \"hello\\n\", 6\n\n"
max = 600 # 600 linex should be more than enough ;)
print "Enter your code, enter the string END_OF_PWN to finish "
while max:
new_code = raw_input("code> ")
if new_code == "END_OF_PWN":
break
code += new_code + "\n"
max -= 1
name = "/tmp/%s" % randstr()
with open(name, "w+") as f:
f.write(code)
flag = open("flag", "r")
os.dup2(flag.fileno(), 1023)
flag.close()
cmd = "timeout 40 ruby %s" % name
os.system(cmd)
| [
"sajjadium@google.com"
] | sajjadium@google.com |
38b5170a2ac939a6a73dfceb7cb566679f67f6de | 0cb1ff9d0be4387e33f1003ab5cc72bab0345e7a | /wildcard/test/test_data/exceptions.py | b55003f488bc7e7760e5c0e1232632abe211c45b | [
"Apache-2.0"
] | permissive | kickstandproject/wildcard | 65995fb0090c4cfcad34f8373cfc912199ecf5da | 0ef2a15d8ac6b1d37db964d0baa7e40f9f771bc9 | refs/heads/master | 2020-05-17T00:41:09.908059 | 2015-01-27T20:25:33 | 2015-01-28T03:30:22 | 14,288,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as keystone_exceptions
from wildcard.test.test_data import utils
def create_stubbed_exception(cls, status_code=500):
msg = "Expected failure."
def fake_init_exception(self, code, message, **kwargs):
self.code = code
self.message = message
def fake_str(self):
return str(self.message)
def fake_unicode(self):
return unicode(self.message)
cls.__init__ = fake_init_exception
cls.__str__ = fake_str
cls.__unicode__ = fake_unicode
cls.silence_logging = True
return cls(status_code, msg)
def data(TEST):
TEST.exceptions = utils.TestDataContainer()
unauth = keystone_exceptions.Unauthorized
TEST.exceptions.keystone_unauthorized = create_stubbed_exception(unauth)
keystone_exception = keystone_exceptions.ClientException
TEST.exceptions.keystone = create_stubbed_exception(keystone_exception)
| [
"paul.belanger@polybeacon.com"
] | paul.belanger@polybeacon.com |
23f642c26b049ab38cc33af5219bab1179f92ca6 | 81c85850747f97ccc6ed36e3e0a859b99ef38fe8 | /agesprot/apps/activity/migrations/0001_initial.py | a3d6c2d181ec3eadaa4bcbb3962dbc5f910112c3 | [] | no_license | agesprot1/agesprot | f5047447a37ea8e92b4ffa2d72ae7814d0af8950 | 34c14a176bca5523999d27d5b9f695a6fac9df96 | refs/heads/master | 2021-01-20T22:11:18.686295 | 2016-08-23T22:38:57 | 2016-08-23T22:38:57 | 61,495,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-16 03:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('base', '0001_initial'),
('project', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Actividad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_actividad', models.CharField(max_length=45)),
('descripcion_actividad', models.CharField(max_length=100)),
('fecha_creacion', models.DateField(auto_now=True)),
('fecha_entrega', models.DateField()),
('estado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Tipo_estado')),
('prioridad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Tipo_prioridad')),
('proyecto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Proyecto')),
],
),
migrations.CreateModel(
name='Actividad_role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('actividad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.Actividad')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Roles_project')),
],
),
]
| [
"alka65@hotmail.com"
] | alka65@hotmail.com |
57c3ea658ea0e1295abf524f1108822e4d6d2a66 | bd867af5245366ee0abfd0f659fcb42170fff8ca | /hackerRank/algorithms/SolveMeFirst/solve_me_first.py | ddfca165b1148f31b2036c8cf23b5fbdc6214bd1 | [] | no_license | kruart/coding_challenges | 04736a6b66da813fd973e7a57aa084bbdab31183 | 395ae60ab392e49bb5bc2f0a4eef1dfd232899bb | refs/heads/master | 2021-06-16T08:51:21.815334 | 2019-11-07T08:39:13 | 2019-11-07T08:39:13 | 153,890,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # https://www.hackerrank.com/challenges/solve-me-first/problem
def solve_me_first(a, b):
return a + b
a = int(input())
b = int(input())
res = solve_me_first(a, b)
print(res)
| [
"weoz@ukr.net"
] | weoz@ukr.net |
6a234a228d30cac988244e65717f7efcd912dead | 3343e5193e2dd14a3dc2c8c375914b12323dea82 | /dnacentersdk/models/validators/v2_1_2/jsd_bf859ac64a0ba19c.py | 76a50a6b8acc29e75be779590c64df8f2276b99d | [
"MIT"
] | permissive | sandhjos/dnacentersdk | 6b483fe61307d4ea377b4bcc343e77aa7994e8bc | 9ca1a1923bc714e49e652099e2d60ee121d789e9 | refs/heads/master | 2023-04-18T17:52:09.350514 | 2021-05-07T23:41:47 | 2021-05-07T23:41:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | # -*- coding: utf-8 -*-
"""DNA Center Create HTTP read credentials data model.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorBf859Ac64A0BA19C(object):
"""Create HTTP read credentials request schema definition."""
def __init__(self):
super(JSONSchemaValidatorBf859Ac64A0BA19C, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"items": {
"properties": {
"comments": {
"description":
"",
"type": [
"string",
"null"
]
},
"credentialType": {
"description":
"",
"enum": [
"GLOBAL",
"APP",
null
],
"type": [
"string",
"null"
]
},
"description":
{
"description":
"",
"type": [
"string",
"null"
]
},
"id": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceTenantId": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceUuid": {
"description":
"",
"type": [
"string",
"null"
]
},
"password": {
"description":
"",
"type": [
"string",
"null"
]
},
"port": {
"type": [
"number",
"null"
]
},
"secure": {
"type": [
"boolean",
"null"
]
},
"username": {
"description":
"",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"jbogarin@altus.cr"
] | jbogarin@altus.cr |
e693a6dea2031551cda8d8b65dfdb15aff401888 | f693c9c487d31a677f009afcdf922b4e7f7d1af0 | /biomixer-venv/lib/python3.9/site-packages/pylint/epylint.py | 12f541c7af626e6e326e516ea1a0f9e7869b4cb0 | [
"MIT"
] | permissive | Shellowb/BioMixer | 9048b6c07fa30b83c87402284f0cebd11a58e772 | 1939261589fe8d6584a942a99f0308e898a28c1c | refs/heads/master | 2022-10-05T08:16:11.236866 | 2021-06-29T17:20:45 | 2021-06-29T17:20:45 | 164,722,008 | 1 | 3 | MIT | 2022-09-30T20:23:34 | 2019-01-08T19:52:12 | Python | UTF-8 | Python | false | false | 7,392 | py | # mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4
# -*- vim:fenc=utf-8:ft=python:et:sw=4:ts=4:sts=4
# Copyright (c) 2008-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014 Jakob Normark <jakobnormark@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Manuel Vázquez Acosta <mva.led@gmail.com>
# Copyright (c) 2014 Derek Harland <derek.harland@finq.co.nz>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Mihai Balint <balint.mihai@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2017, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Daniela Plascencia <daplascen@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Ryan McGuire <ryan@enigmacurry.com>
# Copyright (c) 2018 thernstig <30827238+thernstig@users.noreply.github.com>
# Copyright (c) 2018 Radostin Stoyanov <rst0git@users.noreply.github.com>
# Copyright (c) 2019, 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""Emacs and Flymake compatible Pylint.
This script is for integration with emacs and is compatible with flymake mode.
epylint walks out of python packages before invoking pylint. This avoids
reporting import errors that occur when a module within a package uses the
absolute import path to get another module within this package.
For example:
- Suppose a package is structured as
a/__init__.py
a/b/x.py
a/c/y.py
- Then if y.py imports x as "from a.b import x" the following produces pylint
errors
cd a/c; pylint y.py
- The following obviously doesn't
pylint a/c/y.py
- As this script will be invoked by emacs within the directory of the file
we are checking we need to go out of it to avoid these false positives.
You may also use py_run to run pylint with desired options and get back (or not)
its output.
"""
import os
import shlex
import sys
from io import StringIO
from subprocess import PIPE, Popen
def _get_env():
"""Extracts the environment PYTHONPATH and appends the current sys.path to
those."""
env = dict(os.environ)
env["PYTHONPATH"] = os.pathsep.join(sys.path)
return env
def lint(filename, options=()):
"""Pylint the given file.
When run from emacs we will be in the directory of a file, and passed its
filename. If this file is part of a package and is trying to import other
modules from within its own package or another package rooted in a directory
below it, pylint will classify it as a failed import.
To get around this, we traverse down the directory tree to find the root of
the package this module is in. We then invoke pylint from this directory.
Finally, we must correct the filenames in the output generated by pylint so
Emacs doesn't become confused (it will expect just the original filename,
while pylint may extend it with extra directories if we've traversed down
the tree)
"""
# traverse downwards until we are out of a python package
full_path = os.path.abspath(filename)
parent_path = os.path.dirname(full_path)
child_path = os.path.basename(full_path)
while parent_path != "/" and os.path.exists(
os.path.join(parent_path, "__init__.py")
):
child_path = os.path.join(os.path.basename(parent_path), child_path)
parent_path = os.path.dirname(parent_path)
# Start pylint
# Ensure we use the python and pylint associated with the running epylint
run_cmd = "import sys; from pylint.lint import Run; Run(sys.argv[1:])"
cmd = (
[sys.executable, "-c", run_cmd]
+ [
"--msg-template",
"{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}",
"-r",
"n",
child_path,
]
+ list(options)
)
with Popen(
cmd, stdout=PIPE, cwd=parent_path, env=_get_env(), universal_newlines=True
) as process:
for line in process.stdout:
# remove pylintrc warning
if line.startswith("No config file found"):
continue
# modify the file name thats output to reverse the path traversal we made
parts = line.split(":")
if parts and parts[0] == child_path:
line = ":".join([filename] + parts[1:])
print(line, end=" ")
process.wait()
return process.returncode
def py_run(command_options="", return_std=False, stdout=None, stderr=None):
"""Run pylint from python
``command_options`` is a string containing ``pylint`` command line options;
``return_std`` (boolean) indicates return of created standard output
and error (see below);
``stdout`` and ``stderr`` are 'file-like' objects in which standard output
could be written.
Calling agent is responsible for stdout/err management (creation, close).
Default standard output and error are those from sys,
or standalone ones (``subprocess.PIPE``) are used
if they are not set and ``return_std``.
If ``return_std`` is set to ``True``, this function returns a 2-uple
containing standard output and error related to created process,
as follows: ``(stdout, stderr)``.
To silently run Pylint on a module, and get its standard output and error:
>>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True)
"""
# Detect if we use Python as executable or not, else default to `python`
executable = sys.executable if "python" in sys.executable else "python"
# Create command line to call pylint
epylint_part = [executable, "-c", "from pylint import epylint;epylint.Run()"]
options = shlex.split(command_options, posix=not sys.platform.startswith("win"))
cli = epylint_part + options
# Providing standard output and/or error if not set
if stdout is None:
if return_std:
stdout = PIPE
else:
stdout = sys.stdout
if stderr is None:
if return_std:
stderr = PIPE
else:
stderr = sys.stderr
# Call pylint in a subprocess
with Popen(
cli,
shell=False,
stdout=stdout,
stderr=stderr,
env=_get_env(),
universal_newlines=True,
) as process:
proc_stdout, proc_stderr = process.communicate()
# Return standard output and error
if return_std:
return StringIO(proc_stdout), StringIO(proc_stderr)
return None
def Run():
if len(sys.argv) == 1:
print("Usage: %s <filename> [options]" % sys.argv[0])
sys.exit(1)
elif not os.path.exists(sys.argv[1]):
print("%s does not exist" % sys.argv[1])
sys.exit(1)
else:
sys.exit(lint(sys.argv[1], sys.argv[2:]))
if __name__ == "__main__":
Run()
| [
"marcelo.becerra@ug.uchile.cl"
] | marcelo.becerra@ug.uchile.cl |
73ceb23b6051ec06412393193149dafa2b105e62 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2732.py | 1112dd9834490f34fce3e6b7f8b1d4e4fedbdb3f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!/usr/bin/python3
from math import log
lines = int(input())
for attempt in range(1,lines+1):
N = int(input())
digits = int(log(N, 10))
for place in range(0,digits+1):
digit = (N % (10 ** (place + 1)) // (10 ** place))
next_digit = (N % (10 ** (place + 2)) // (10 ** (place + 1)))
if digit < next_digit:
N -= (N % (10 ** (place + 1)) + 1)
print('Case #' + str(attempt) + ': ' + str(N))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3daffa234c6aa57bcd92b929a867332bb323d54a | 9004f36238c6b601d388b679f430d7614dcb8507 | /docs/source/conf.py | 4778b42c7e3c497d1a7e5dd1faf30927c8ed894c | [
"MIT"
] | permissive | Zwork101/backpack.py | 32107a0665bf46b81d3e9b6b2bbbc3fe35b2ade4 | 92dca1d6aa55438abe313ffc0cf2b14916238d5d | refs/heads/master | 2021-01-22T04:18:34.807054 | 2017-08-14T13:28:38 | 2017-08-14T13:28:38 | 92,454,505 | 10 | 2 | null | 2017-08-14T13:28:39 | 2017-05-26T00:01:23 | Python | UTF-8 | Python | false | false | 4,940 | py | import sphinx_rtd_theme
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# backpack.py-docs documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 25 17:00:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.rsttemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'backpack.py-docs'
copyright = '2017, Zwork101'
author = 'Zwork101'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'navigation_depth': 3,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.rststatic']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'backpackpy-docsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'backpackpy-docs.tex', 'backpack.py-docs Documentation',
'Zwork101', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'backpackpy-docs', 'backpack.py-docs Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'backpackpy-docs', 'backpack.py-docs Documentation',
author, 'backpackpy-docs', 'One line description of project.',
'Miscellaneous'),
]
| [
"zwork101@gmail.com"
] | zwork101@gmail.com |
21364fa67620be18c6d8c2548c8efbba02fa0c26 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-cognitiveservices-search-customsearch/azure/cognitiveservices/search/customsearch/models/answer.py | 50dd4581e3630de0d5da4538c51262ca70084e2e | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,815 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .response import Response
class Answer(Response):
"""Answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.customsearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer'}
}
def __init__(self):
super(Answer, self).__init__()
self.follow_up_queries = None
self._type = 'Answer'
| [
"noreply@github.com"
] | lmazuel.noreply@github.com |
eb5501a72b2dadfe695fbbf4f5bfebce35b113b1 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_2/models/alert_get_response.py | e958eff10dfa33d789703b5137b659a3506f1165 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,212 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class AlertGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Alert]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Alert]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[Alert]): A list of alert objects.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AlertGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"msholes@purestorage.com"
] | msholes@purestorage.com |
ecc2abe94866c5f7103035ef171af8b7d9358fc3 | 96fe253e9a740b51dcd7f83d6ab01bb248c2bf4b | /patrones_poma/cohesive_modules/cohesivo_aciclico/tst_cohesivo_aciclico.py | 0415e8c1c409d6b307ab03f7c4ca238275c21b6d | [] | no_license | vvalotto/Patrones_Disenio_Python | 7574470752a5f14214434a927c2c5e0faaa592ba | 7ab6a74e9b008c3434af0a56d4c2b6b7de3617bf | refs/heads/master | 2021-04-28T19:16:21.535998 | 2018-10-21T14:05:36 | 2018-10-21T14:05:36 | 121,891,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from POMA.cohesive_modules.cohesivo_aciclico.factura.factura import *
from POMA.cohesive_modules.cohesivo_aciclico.ruteador.ruteador_tipoA import *
mi_factura = Factura("A", "1", 200.20, RuteadorTipoA())
print(mi_factura.rutear())
print(mi_factura.obtener_prioridad())
| [
"vvalotto@gmail.com"
] | vvalotto@gmail.com |
79dfcc06309bb5e28a4290371c43aab711ed6714 | 61d434953d55af170c4abb023686256509a4bffc | /restapi/schemas/replies/ReplySchema.py | 04c71429ee46d94f129a9d811dfb265abba5a929 | [] | no_license | mentimun-mentah/tridatu-backend | c6e471bc08c010ebc5b3fcdf5ef5bac0c33758de | 32cf22c24327b228cba57782ffd4740906e8e7d8 | refs/heads/master | 2023-03-14T13:32:31.264593 | 2021-03-09T13:30:12 | 2021-03-09T13:30:12 | 302,246,645 | 4 | 1 | null | 2021-03-09T13:30:13 | 2020-10-08T06:04:50 | Python | UTF-8 | Python | false | false | 755 | py | from pydantic import BaseModel, constr, validator
from typing import List
from datetime import datetime
class ReplySchema(BaseModel):
class Config:
min_anystr_length = 1
anystr_strip_whitespace = True
class ReplyCreate(ReplySchema):
message: constr(strict=True, min_length=5)
comment_id: constr(strict=True, regex=r'^[0-9]*$')
@validator('comment_id')
def parse_str_to_int(cls, v):
return int(v) if v else None
class ReplyData(ReplySchema):
replies_id: str
replies_message: str
replies_user_id: str
replies_created_at: datetime
users_username: str
users_avatar: str
users_role: str
class ReplyCommentData(ReplySchema):
comments_id: str
comments_replies: List[ReplyData]
| [
"nyomanpradipta120@gmail.com"
] | nyomanpradipta120@gmail.com |
cc51f88f7e67cce097fbe723ea57b273d945e573 | eb2df6020f5759feee3d6d78c5f8c78999454a09 | /migrations/versions/d8e9859f3cbc_.py | 03229422b0101f909d63df429c85801810a4f7f5 | [] | no_license | mywork-dragon/dave-energy | 7a08f855d245c2d90a9c13aa85fc3b9f28ae9294 | 4b3430be6ef6957389ab05be3a17a0245f5d6662 | refs/heads/master | 2023-07-28T02:55:26.791724 | 2021-09-06T11:44:30 | 2021-09-06T11:44:30 | 365,872,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | # type: ignore
"""Devices
Revision ID: d8e9859f3cbc
Revises: 228cb8a5f8d4
Create Date: 2020-03-06 11:48:38.819954
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "d8e9859f3cbc"
down_revision = "228cb8a5f8d4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"devices",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("unit", sa.String(length=255), nullable=True),
sa.Column("building_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["building_id"],
["buildings.id"],
name=op.f("fk_devices_building_id_buildings"),
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_devices")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("devices")
# ### end Alembic commands ###
| [
"dragonblueyounger@gmail.com"
] | dragonblueyounger@gmail.com |
cad6d289e8b1c0a1fd07a1687f78153f63bc7a2a | cbb2dde4b4695d6a3012a5c2540265c45a840b32 | /bin/evtest.py | b99ad5422931e97f5de044fb5f7cfbc34233287d | [
"BSD-2-Clause"
] | permissive | sil2100/python-evdev | 806d1c8e158976446cf01b84972f15b5badc9e23 | d076168037ac6b78e7e8243918c4d3ce0e0923aa | refs/heads/master | 2021-01-24T05:47:36.899225 | 2013-05-28T13:22:07 | 2013-05-28T13:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python
# encoding: utf-8
'''
evdev example - input device event monitor
'''
from sys import argv, exit
from select import select
from evdev import ecodes, InputDevice, list_devices, AbsInfo
usage = 'usage: evtest <device> [<type> <value>]'
evfmt = 'time {:<16} type {} ({}), code {:<4} ({}), value {}'
device_dir = '/dev/input/'
query_type = None
query_value = None
def select_device():
''' Select a device from the list of accessible input devices '''
devices = [InputDevice(i) for i in reversed(list_devices(device_dir))]
dev_fmt = '{0:<3} {1.fn:<20} {1.name:<35} {1.phys}'
dev_lns = [dev_fmt.format(n, d) for n, d in enumerate(devices)]
print('ID {:<20} {:<35} {}'.format('Device', 'Name', 'Phys'))
print('-' * len(max(dev_lns, key=len)))
print('\n'.join(dev_lns))
print('')
choice = input('Select device [0-{}]:'.format(len(dev_lns)-1))
return devices[int(choice)]
def print_event(e):
if e.type == ecodes.EV_SYN:
if e.code == ecodes.SYN_MT_REPORT:
print('time {:<16} +++++++++ {} ++++++++'.format(e.timestamp(), ecodes.SYN[e.code]))
else:
print('time {:<16} --------- {} --------'.format(e.timestamp(), ecodes.SYN[e.code]))
else:
if e.type in ecodes.bytype:
codename = ecodes.bytype[e.type][e.code]
else:
codename = '?'
print(evfmt.format(e.timestamp(), e.type, ecodes.EV[e.type], e.code, codename, e.value))
if len(argv) == 1:
device = select_device()
elif len(argv) == 2:
device = InputDevice(argv[1])
elif len(argv) == 4:
device = InputDevice(argv[1])
query_type = argv[2]
query_value = argv[3]
else:
print(usage) ; exit(1)
print('Device name: {.name}'.format(device))
print('Device info: {.info}'.format(device))
print('Repeat settings: {}'.format(device.repeat))
print('Device capabilities:')
for type, codes in device.capabilities(verbose=True).items():
print(' Type {} {}:'.format(*type))
for i in codes:
if isinstance(i[1], AbsInfo):
print(' Code {:<4} {}:'.format(*i[0]))
print(' {}'.format(i[1]))
else:
print(' Code {:<4} {}'.format(*i))
print('')
print('Listening for events ...\n')
while True:
r, w, e = select([device], [], [])
for ev in device.read():
print_event(ev)
| [
"georgi.t.valkov@gmail.com"
] | georgi.t.valkov@gmail.com |
6b868d1463284f7e4da356224d0455120fc4d8f2 | f16326f33b286ac10740016c67446dd3279ee60e | /sklearn/tutorials/demo.py | 772c25525fda01237488f7277447a5bd37811e95 | [] | no_license | zhuliyi10/ml_demo | da1b4189a941e079cb780bcf6ab5ae710d407556 | 04303ea37dbfc0ba8dd57e77ff53ccdcae1e5ce5 | refs/heads/master | 2020-03-31T08:23:16.677823 | 2019-01-24T08:32:49 | 2019-01-24T08:32:49 | 152,054,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_digits()
data = iris.images
output = iris.target
print(data.shape)
data1 = data.reshape((data.shape[0], -1))
print(data1.shape)
plt.imshow(data[0], cmap=plt.cm.gray_r)
plt.show()
| [
"2280835569@qq.com"
] | 2280835569@qq.com |
2080139d54365d3f68b0112f690fbba5d900f8c9 | 635c9f0501039f5a099849a3108e19de76092aea | /lecture/ssafy_190322/todo/todo/settings.py | b197b4b94c64ba9757a523c9448b11e6fa9af024 | [] | no_license | Hansung-Lee/SSAFY | 87ebea0808bb40381678d678e1035dc5fa2c2eb0 | cdb7ae1bba0e98e733eed703da2c62217c319462 | refs/heads/master | 2020-04-14T20:03:05.975040 | 2019-05-16T08:57:21 | 2019-05-16T08:57:21 | 164,080,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,248 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@9oh55pu^-bogvo(eb)ndu9!!@0^76cv043ln-fv9-hujzvafp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'todos',
'shout',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
| [
"ajtwlsgkst@naver.com"
] | ajtwlsgkst@naver.com |
fc8db3e4cc9c4ef2ecf999f752fac8a6ddd96c26 | ece3a452b51d2cbcac6c20481ab6660b77c3f955 | /Eigenbasis.py | 1dca2aade14eaf471cc74f426bd77d2aae761a2a | [] | no_license | ilmoi/imperial_math | ef1d220a361c4dab6fde436b482941d27e97f8f3 | a3e8537dc201fef486a17f7a5e024fa1d60e2644 | refs/heads/master | 2022-08-09T20:29:13.805569 | 2020-05-14T16:42:13 | 2020-05-14T16:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | import numpy as np
#in numpy "matrix" - strictly 2d. need to use arrays instead.
#in numpy need to write numbers horizonally, not vertically
T = np.array([[3/2,-1],[-1/2,1/2]]) #transform
v = np.array([-1,1]) #vector to check
Ans1 = T @ T @ v
C = np.array([[2.73,1],[-1,1.366]]) #two eigenvectors
C_inv = np.linalg.inv(C)
D = np.array([[1.866,0],[0,0.134]]) # [[eigenvalue1, 0],[0, eigenvalue2]]
new_T = C @ D @ D @ C_inv
Ans2 = new_T @ v
print('method 1 results in: ' +str(Ans1))
print('method 1 results in: ' +str(Ans2)) | [
"iljamoisejevs@gmail.com"
] | iljamoisejevs@gmail.com |
2f05b4b9c701ea5fb7a5fb4e59cda30ae79ab86c | 30291450c064006f1bd9bc5c432b8a869e2166bb | /tags/0.6/tests/test_types_cn.py | 6a1c00d8014fa97165b2840de092c295ce17b0f9 | [
"MIT"
] | permissive | BGCX261/zhpy-svn-to-git | 96f04e2f72c61671324219a85939137ff5cd9ef6 | 70da095393fe13543433ab5115cb6c1a519d64b0 | refs/heads/master | 2021-01-22T22:49:04.898314 | 2015-08-25T15:44:00 | 2015-08-25T15:44:00 | 41,587,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | #coding=utf-8
"""
test build-in types
"""
from zhpy import convertor
def test_int():
"""
test int type
"""
assert convertor("整数(2.0)") == "int(2.0)"
def test_float():
"""
test float type
"""
assert convertor("小数(2)") == "float(2)"
def test_boolean():
"""
test boolean type
"""
assert convertor("n = 真") == "n = True"
assert convertor("p = 假") == "p = False"
assert convertor("q = 实") == "q = True"
assert convertor("r = 虛") == "r = False"
def test_string():
"""
same as print test
"""
assert convertor("s.开始为('he')") == "s.startswith('he')"
assert convertor("s.结束为('he')") == "s.endswith('he')"
def test_list():
"""
test list type
"""
assert convertor("列表((1,2,3,4)) == [1,2,3,4]") == \
"list((1,2,3,4)) == [1,2,3,4]"
assert convertor("a = []; a.加入(2); 宣告 a == [2]") == \
"a = []; a.append(2); assert a == [2]"
p = "h,e,l,l,o"
assert convertor('p.分离(",")') == 'p.split(",")'
def test_dict():
"""
test dict type
"""
assert convertor("字典(a=1, b=2) == {'a':1, 'b':2}") == \
"dict(a=1, b=2) == {'a':1, 'b':2}"
def test_tuple():
"""
test tuple type
"""
assert convertor("数组([1,2,3,4]) == (1,2,3,4)") == \
"tuple([1,2,3,4]) == (1,2,3,4)"
def test_set():
"""
test set type
"""
assert convertor("类组([1,2,3,4]) = set([1, 2, 3, 4])") == \
"set([1,2,3,4]) = set([1, 2, 3, 4])"
def test_file():
"""
test file type
"""
assert convertor('fd = 打开("ReadMe_test.txt", "r")') == \
'fd = open("ReadMe_test.txt", "r")'
assert convertor('temp = fd.读一行()') == 'temp = fd.readline()'
assert convertor('temp = fd.读多行()') == 'temp = fd.readlines()'
assert convertor('temp = fd.读取()') == 'temp = fd.read()'
assert convertor('fd.写入(temp)') == 'fd.write(temp)'
assert convertor('fd.关闭()') == 'fd.close()' | [
"you@example.com"
] | you@example.com |
5fbe66864d69107584b51660b80c9014d7f093c0 | 6967eccf98ad8c51e69606287279c9ed1c0d344f | /tests/components/sensor/test_mqtt.py | b59ea867c5886f709ca8a4032536d58c11395eec | [
"MIT"
] | permissive | plucena24/home-assistant | 6c146daceff147db2e22b74a538f4c68f20029ca | 060cbaf66b3722480e6bca54c2c32111179e7067 | refs/heads/dev | 2023-08-18T07:09:55.539600 | 2015-10-23T22:38:19 | 2015-10-23T22:38:19 | 44,849,509 | 0 | 0 | MIT | 2023-08-08T18:37:20 | 2015-10-24T02:50:00 | Python | UTF-8 | Python | false | false | 1,220 | py | """
tests.components.sensor.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests mqtt sensor.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.sensor as sensor
from tests.common import mock_mqtt_component, fire_mqtt_message
class TestSensorMQTT(unittest.TestCase):
""" Test the MQTT sensor. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setting_sensor_value_via_mqtt_message(self):
self.assertTrue(sensor.setup(self.hass, {
'sensor': {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'unit_of_measurement': 'fav unit'
}
}))
fire_mqtt_message(self.hass, 'test-topic', '100')
self.hass.pool.block_till_done()
state = self.hass.states.get('sensor.test')
self.assertEqual('100', state.state)
self.assertEqual('fav unit',
state.attributes.get('unit_of_measurement'))
| [
"paulus@paulusschoutsen.nl"
] | paulus@paulusschoutsen.nl |
9c444fdadbfca4ef40a3f4389874729e20364461 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/toontowngui/FeatureComingSoonDialog.py | 3a5555537952b8d9f862f80f1ae3ae4fb12c852c | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 794 | py | #Embedded file name: toontown.toontowngui.FeatureComingSoonDialog
from direct.fsm import ClassicFSM, State
from toontown.toonbase.ToontownGlobals import OptionsPageHotkey
from toontown.toontowngui import TTDialog
class FeatureComingSoonDialog:
def __init__(self, text = 'Woah! That feature will be enabled in \n\x01textShadow\x01beta\x02! Sorry about that!'):
self.dialog = TTDialog.TTGlobalDialog(dialogName='ComingSoon', doneEvent='exitDialog', style=TTDialog.Acknowledge, text=text, text_wordwrap=24, text_pos=(0, 0, -0.8), suppressKeys=True, suppressMouse=True)
self.dialog.accept('exitDialog', self.exitDialog)
base.transitions.fadeScreen(0.2)
def exitDialog(self):
base.transitions.noFade()
self.dialog.cleanup()
del self.dialog
| [
"linktlh@gmail.com"
] | linktlh@gmail.com |
f26d03d5110d3947d2d23a41ebbf1f0641052bf0 | 15d3a10db27128c06f84c30fa8d64b2e1c629fd9 | /express/pallets/migrations/0013_airwaybill_channel.py | c0115cf07beb503bf25d4c75061716ce3a2dd652 | [] | no_license | yiyuhao/exp | 7cba6650e3113ba05698f90a7baf75b680dd6435 | 866a90b2e6f0d113559b0674f514cdd56020f7d6 | refs/heads/master | 2020-03-19T20:20:04.799355 | 2018-07-15T14:55:24 | 2018-07-15T14:55:24 | 136,897,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-18 01:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pallets', '0012_auto_20170317_1722'),
]
operations = [
migrations.AddField(
model_name='airwaybill',
name='channel',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='air_waybills', to='pallets.Channel', verbose_name='\u6e20\u9053'),
),
]
| [
"yiyuhao@mixadx.com"
] | yiyuhao@mixadx.com |
cc105435957503dedf2f458f6b7088c268b060db | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_blazons.py | c2fbb9176bef4ff2bfd573dca5916e841fcd4995 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._blazon import _BLAZON
#calss header
class _BLAZONS(_BLAZON, ):
def __init__(self,):
_BLAZON.__init__(self)
self.name = "BLAZONS"
self.specie = 'verbs'
self.basic = "blazon"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
222a2fb37a868059da32a081e15ecbce85b1ff49 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_peeked.py | 8d7bc05fa8a2f971e389bfaba31a98cad7940000 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.verbs._peek import _PEEK
#calss header
class _PEEKED(_PEEK, ):
def __init__(self,):
_PEEK.__init__(self)
self.name = "PEEKED"
self.specie = 'verbs'
self.basic = "peek"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c6fb5bf9e1f88c1482a354106390c40ed92f2fb0 | adfb9b91518752b361713594eacd850557f7721b | /tests/test_html_formatter.py | 9e0fd4ac17549f8581a9e9139cf292adde5af931 | [
"BSD-2-Clause"
] | permissive | erickt/pygments | 6223c688cbb6ef81ab3f73d6aa7da9fa796bb6c4 | 05d4b6ce7e51501d2ac22919386017c08c9f5547 | refs/heads/master | 2021-01-10T18:59:43.144002 | 2009-05-17T17:05:11 | 2009-05-17T17:05:11 | 1,125,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,348 | py | # -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import unittest
import StringIO
import tempfile
from os.path import join, dirname, isfile, abspath
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
import support
TESTFILE, TESTDIR = support.location(__file__)
tokensource = list(PythonLexer(encoding='utf-8').get_tokens(open(TESTFILE).read()))
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
houtfile = StringIO.StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
noutfile = StringIO.StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
escaped_text = escape_html(noutfile.getvalue())
self.assertEquals(stripped_html, escaped_text)
def test_external_css(self):
# test correct behavior
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
self.assert_(isfile(join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
self.assert_(isfile(join(dirname(tfile.name), 'fmt1.css')))
os.unlink(join(dirname(tfile.name), 'fmt1.css'))
try:
os.unlink(join(TESTDIR, 'fmt2.css'))
except OSError:
pass
def test_all_options(self):
for optdict in [dict(nowrap=True),
dict(linenos=True),
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_valid_output(self):
# test all available wrappers
fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
try:
import subprocess
ret = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
stdout=subprocess.PIPE).wait()
except ImportError:
# Python 2.3 - no subprocess module
ret = os.popen('nsgmls -s -c "%s" "%s"' % (catname, pathname)).close()
if ret == 32512: raise OSError # not found
except OSError:
# nsgmls not available
pass
else:
self.failIf(ret, 'nsgmls run reported errors')
os.unlink(pathname)
def test_get_style_defs(self):
fmt = HtmlFormatter()
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.'))
fmt = HtmlFormatter(cssclass='foo')
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.foo'))
sd = fmt.get_style_defs('.bar')
self.assert_(sd.startswith('.bar'))
sd = fmt.get_style_defs(['.bar', '.baz'])
fl = sd.splitlines()[0]
self.assert_('.bar' in fl and '.baz' in fl)
def test_unicode_options(self):
fmt = HtmlFormatter(title=u'Föö',
cssclass=u'bär',
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
| [
"devnull@localhost"
] | devnull@localhost |
8018c52bfae85b474764db99ee57fd7438b91010 | bc441bb06b8948288f110af63feda4e798f30225 | /scheduler_sdk/model/topology/link_pb2.pyi | dd2a9cad5ae92103824e305f7a2d316b07a017af | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from scheduler_sdk.model.topology.linkStyle_pb2 import (
LinkStyle as scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Link(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
source = ... # type: typing___Text
target = ... # type: typing___Text
@property
def style(self) -> scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle: ...
def __init__(self,
*,
source : typing___Optional[typing___Text] = None,
target : typing___Optional[typing___Text] = None,
style : typing___Optional[scheduler_sdk___model___topology___linkStyle_pb2___LinkStyle] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Link: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Link: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"style",b"style"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"source",b"source",u"style",b"style",u"target",b"target"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
6abcd1f7e1f19b6f90c46dbd20e5481f4ebc5940 | 35fb652b0b20e7352cacdc078e23464fad40ccf3 | /web/controllers/api/member.py | 1e2ff1b387e4e2d46f19dd619234d93e17030ffd | [] | no_license | xiaoheng14/flask_wx_order | 52f8fe01a473855c22a43c2651b102c291dbde04 | be3314fdb0266eecf4ca7f5a55b2ea24078857c9 | refs/heads/master | 2020-08-23T03:59:19.006943 | 2018-11-19T12:21:25 | 2018-11-19T12:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | # _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/11/14 3:17 PM'
"""
from flask import request, jsonify
from web.controllers.api import route_api
from application import app, db
from common.models.member.member import Member
from common.models.member.oauth_member_bind import OauthMemberBind
from common.libs.helper import get_current_date
from common.libs.member.member_service import MemberService
@route_api.route("/member/login", methods=["GET", "POST"])
def login():
resp = {"code": 200, "msg": "操作成功", "data": {}}
req = request.values
code = req["code"] if "code" in req else ""
if not code or len(code) < 1:
resp["code"] = -1
resp["msg"] = "需要code"
return jsonify(resp)
openid = MemberService.get_wx_openid(code)
if openid is None:
resp["code"] = -1
resp["msg"] = "调用微信出错"
return jsonify(resp)
nickname = req["nickName"] if "nickName" in req else ""
sex = req["gender"] if "gender" in req else 0
avatar = req["avatarUrl"] if "avatarUrl" in req else ""
# 判断是否已经测试过,注册了直接返回一些信息
bind_info = OauthMemberBind.query.filter_by(openid=openid, type=1).first()
if not bind_info:
model_member = Member()
model_member.nickname = nickname
model_member.sex = sex
model_member.avatar = avatar
model_member.salt = MemberService.gen_salt()
model_member.updated_time = model_member.created_time = get_current_date()
db.session.add(model_member)
db.session.commit()
model_bind = OauthMemberBind()
model_bind.member_id = model_member.id
model_bind.type = 1
model_bind.openid = openid
model_bind.extra = ""
model_bind.updated_time = model_bind.created_time = get_current_date()
db.session.add(model_bind)
db.session.commit()
bind_info = model_bind
member_info = Member.query.filter_by(id=bind_info.member_id).first()
token = "{}#{}".format(MemberService.gen_auth_code(member_info), member_info.id)
resp["data"] = {"token": token}
return jsonify(resp)
@route_api.route("/member/check-reg", methods=["GET", "POST"])
def check_reg():
resp = {"code": 200, "msg": "操作成功", "data": {}}
req = request.values
code = req["code"] if "code" in req else ""
if not code or len(code) < 1:
resp["code"] = -1
resp["msg"] = "需要code"
return jsonify(resp)
openid = MemberService.get_wx_openid(code)
if openid is None:
resp["code"] = -1
resp["msg"] = "调用微信出错"
return jsonify(resp)
bind_info = OauthMemberBind.query.filter_by(openid=openid, type=1).first()
if not bind_info:
resp['code'] = -1
resp['msg'] = "未绑定"
return jsonify(resp)
member_info = Member.query.filter_by(id=bind_info.member_id).first()
if not member_info:
resp['code'] = -1
resp['msg'] = "未查询到绑定信息"
return jsonify(resp)
token = "{}#{}".format(MemberService.gen_auth_code(member_info), member_info.id)
resp["data"] = {"token": token}
return jsonify(resp)
| [
"584563542@qq.com"
] | 584563542@qq.com |
5b7a6d3de8a6a095f5ae66152608704c05e1b35f | 3b6b6a580bf6127b288a42ab4519565adc720fbd | /days/081-084-unit-testing/demo/billtracker/billtracker/views/default.py | 3f33c6eaf1a96ca4e7eb9e56dae0090be863f2f8 | [] | no_license | talkpython/100daysofweb-with-python-course | f1f296a5e52670fccba895e078318a5098f96e2f | c6f2fb22a29f74284b2d52ee019e0ace6a6353fc | refs/heads/master | 2023-07-19T11:21:46.515974 | 2023-04-25T21:34:27 | 2023-04-25T21:34:27 | 134,765,291 | 627 | 495 | null | 2023-04-25T21:34:28 | 2018-05-24T20:28:21 | JavaScript | UTF-8 | Python | false | false | 1,271 | py | from pyramid.httpexceptions import HTTPFound
from pyramid.request import Request
from pyramid.response import Response
from pyramid.view import view_config
from billtracker.data import repository
from billtracker.viewmodels.default.index_viewmodel import IndexViewModel
from billtracker.viewmodels.default.bill_details_viewmodel import BillDetailsViewModel
@view_config(route_name='home', renderer='../templates/home/default.pt')
def home(request: Request):
vm = IndexViewModel(request, user_id=1)
return vm.to_dict()
@view_config(route_name='details',
renderer='../templates/home/details.pt',
request_method='GET')
def details_get(request: Request):
vm = BillDetailsViewModel(request, user_id=1)
if not vm.bill:
return Response(status=404)
return vm.to_dict()
@view_config(route_name='details',
renderer='../templates/home/details.pt',
request_method='POST')
def details_post(request: Request):
vm = BillDetailsViewModel(request, user_id=1)
if not vm.bill:
return Response(status=404)
vm.from_form()
if vm.error:
return vm.to_dict()
repository.add_payment(vm.amount, vm.bill_id)
return HTTPFound(location='/bill/{}'.format(vm.bill_id))
| [
"mikeckennedy@gmail.com"
] | mikeckennedy@gmail.com |
47168d33475c995b581c6678f725b0552cf48aa9 | 3611880ca0cb9266ca30aeaa318342e64bf5302e | /workers/record_cleaner/appcast.py | 8dcb0d2b24ab40fb5236dc864ddb7d386e77ff1d | [] | no_license | Roychenlei/Algorithms_Learning | dff9311ae2ec61db8188e880e19a1e6432fb3fd8 | 468135e10d490dd861d3826bfbd4776d9261dbb7 | refs/heads/master | 2021-06-01T20:21:17.307450 | 2019-08-06T09:32:45 | 2019-08-06T09:32:45 | 96,177,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from .base_record_parser import BaseParser
class Parser(BaseParser):
source_name = 'AC'
desc_tag_name = 'body'
def build_id(self):
return self.orig_data.get('job_reference')
def build_industry(self):
# category may exist in orig data and the value is null
# the output is a list with at least 1 industry
return [self.orig_data.get('appcast_category')]
def build_price(self):
return 'PAY_SCALE_2'
| [
"roychenfly@gmail.com"
] | roychenfly@gmail.com |
dfe8082e2726ca32bc985e3483fbc0681f060f9e | 4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e | /scadparser/grammar/ScadModel.py | a8c274c58936bf26cbee79f06bab9ab9de132def | [] | no_license | rblack42/ScadParser | 71081adb99ec03e78bc78b4101562b7fa1bab134 | a9cc10b23c6515a53065dfb58b23881d0145f88d | refs/heads/master | 2023-07-11T03:51:53.434534 | 2021-08-27T02:03:37 | 2021-08-27T02:03:37 | 397,718,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | #!/usr/bin/env python
# CAVEAT UTILITOR
#
# This file was automatically generated by TatSu.
#
# https://pypi.python.org/pypi/tatsu/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import annotations
from tatsu.objectmodel import Node
from tatsu.semantics import ModelBuilderSemantics
class ModelBase(Node):
pass
class SCADModelBuilderSemantics(ModelBuilderSemantics):
def __init__(self, context=None, types=None):
types = [
t for t in globals().values()
if type(t) is type and issubclass(t, ModelBase)
] + (types or [])
super(SCADModelBuilderSemantics, self).__init__(context=context, types=types)
| [
"roie.black@gmail.com"
] | roie.black@gmail.com |
e9497da1b7d035087b8eb5ad7e528fddf548b62a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/DeepPose/mmpose/apis/train.py | 76ad0f82d5df6af09e71b3f5aea7b724e26599de | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,446 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import warnings
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook
from mmpose.core import DistEvalHook, EvalHook, build_optimizers
from mmpose.core.distributed_wrapper import DistributedDataParallelWrapper
from mmpose.datasets import build_dataloader, build_dataset
from mmpose.utils import get_root_logger
from apex import amp
from apex.optimizers import NpuFusedAdam
try:
from mmcv.runner import Fp16OptimizerHook
except ImportError:
warnings.warn('Fp16OptimizerHook from mmpose will be deprecated from '
'v0.15.0. Please install mmcv>=1.1.4')
from mmpose.core import Fp16OptimizerHook
from mmcv.runner.optimizer.builder import OPTIMIZERS
from apex.optimizers import NpuFusedAdam
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (Dataset): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
dataloader_setting = dict(
samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
# cfg.gpus will be ignored if distributed
num_gpus=meta['world_size'],
dist=distributed,
seed=cfg.seed)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('train_dataloader', {}))
data_loaders = [
build_dataloader(ds, **dataloader_setting) for ds in dataset
]
# determine wether use adversarial training precess or not
use_adverserial_train = cfg.get('use_adversarial_train', False)
OPTIMIZERS.register_module(None,True,NpuFusedAdam)
# build runner
optimizer = build_optimizers(model, cfg.optimizer)
#apex
model.backbone, optimizer = amp.initialize(model.backbone,
optimizer,
opt_level='O2',
loss_scale=128,
combine_grad=True)
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', True)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
if use_adverserial_train:
# Use DistributedDataParallelWrapper for adversarial training
model = DistributedDataParallelWrapper(
model,
device_ids=[meta['rank']],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDistributedDataParallel(
model,
device_ids=[meta['rank']],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
runner = EpochBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
if use_adverserial_train:
# The optimizer step process is included in the train_step function
# of the model, so the runner should NOT include optimizer hook.
optimizer_config = None
else:
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting = dict(
samples_per_gpu=1,
workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
drop_last=False,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('val_dataloader', {}))
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
bd8e4b98babedd08b66d1135954ac48de6ff66b9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startQiskit327.py | 8781c3f761d36325fd575c80bf67b3f550e6def0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.y(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=10
prog.cx(input_qubit[1],input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit327.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
1284b69fe8bdaab6e0a57bb5e72c81d69541cd45 | 59642dc8f6d4059d2d36f4f64a92d8edf30c33c6 | /plone/app/blocks/layoutbehavior.py | 0911c4725ca5b68aec437581eabf16baac0d6cec | [] | no_license | lrowe/plone.app.blocks | a855691f2b41ef8ad4b70d8a03c1076bcb1031f8 | 7a4df3a0aff953fe872f85b904ff5f51826ff7b1 | refs/heads/master | 2021-01-24T02:39:12.443662 | 2011-09-22T16:18:50 | 2011-09-22T16:18:50 | 2,428,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | from zope.interface import implements, alsoProvides, Interface
from zope import schema
from plone.app.blocks.interfaces import IOmittedField
from plone.app.blocks.interfaces import ILayoutField
from plone.app.blocks.interfaces import _
class LayoutField(schema.Text):
"""A field used to store layout information
"""
implements(ILayoutField)
class ILayoutAware(Interface):
"""Behavior interface to make a type support layout.
"""
content = LayoutField(
title=_(u"Content"),
description=_(u"Content of the object"),
required=False,
)
pageSiteLayout = schema.Choice(
title=_(u"Page site layout"),
description=_(u"Site layout to apply to the current page"),
vocabulary="plone.availableSiteLayouts",
required=False,
)
sectionSiteLayout = schema.Choice(
title=_(u"Section site layout"),
description=_(u"Site layout to apply to pages under this section"),
vocabulary="plone.availableSiteLayouts",
required=False,
)
try:
from plone.autoform.interfaces import IFormFieldProvider
alsoProvides(ILayoutAware, IFormFieldProvider)
except ImportError:
pass
alsoProvides(ILayoutAware['content'], IOmittedField)
alsoProvides(ILayoutAware['pageSiteLayout'], IOmittedField)
alsoProvides(ILayoutAware['sectionSiteLayout'], IOmittedField)
| [
"optilude@gmail.com"
] | optilude@gmail.com |
87f332437c9bc34c9d50b234bcc08bcea6e2ca64 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /shared/db_opear/configs_data/equipment/equipment_strengthen_config.py | 90fb9e642feb720d0537cc1c61fd07df596fbc39 | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-7-7下午2:15.
"""
from shared.db_opear.configs_data.common_item import CommonItem
class EquipmentStrengthenConfig(object):
def __init__(self):
self._equipment_strengthen = {}
def parser(self, config_value):
for row in config_value:
self._equipment_strengthen[row.get('level')] = CommonItem(row)
return self._equipment_strengthen
| [
"zxzxck@163.com"
] | zxzxck@163.com |
b528cfce7e6dc4f36240451dbd03f49e575f7fc2 | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /include/tensorflow/lite/testing/op_tests/unfused_gru.py | 786a429fe9aa2232dff76f93ef089a132bee077a | [
"MIT"
] | permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 2,400 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unfused_gru."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
"""Build the graph for unfused_gru."""
inputs = [
tf.compat.v1.placeholder(
tf.float32, [parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for unfused_gru."""
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| [
"finn.niu@apptech.com.hk"
] | finn.niu@apptech.com.hk |
a2a61cea90544794a4610062d9a3c01c4da83557 | 9b1c5c3fb40ca4fbd2123a321296d6b7924a84ad | /core/models.py | e2db994d3c85f0abeee175ad5900bd6f46e758b7 | [] | no_license | thepsalmist/theStore | 4ab3874605d45014ebe72bbf2303bf453afd0f17 | 7586b1f425925ecf6b08f8ac6b11e4381604d616 | refs/heads/master | 2022-03-27T08:12:01.020333 | 2019-12-18T09:00:40 | 2019-12-18T09:00:40 | 226,167,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,033 | py | from django.db import models
from django.conf import settings
from django.shortcuts import reverse
from django.utils import timezone
from PIL import Image
# create label choices
LABEL_CHOICES = (("P", "primary"), ("S", "secondary"), ("D", "danger"))
class Category(models.Model):
# create tuple, first category goes to dB second displayed on screen
CATEGOTY_CHOICES = (
("EDUCATION", "Education"),
("BUSINESS", "Business"),
("DESIGN", "Design"),
("SECURITY", "Security"),
("GAMES", "Games"),
)
title = models.CharField(choices=CATEGOTY_CHOICES,
max_length=20, default="BUSINESS")
slug = models.SlugField(max_length=200, db_index=True)
class Meta:
ordering = ("title",)
verbose_name = "category"
verbose_name_plural = "categories"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("core:home", args=[self.slug])
class Brand(models.Model):
BRAND_CHOICES = (
("MICROSOFT", "Microsoft"),
("ADOBE", "Adobe"),
("AVAST", "Avast"),
("KASPERSKY", "Kaspersky"),
("NORTON", "Norton"),
)
title = models.CharField(choices=BRAND_CHOICES,
max_length=20, default="MICROSOFT")
slug = models.SlugField(max_length=200, db_index=True)
class Meta:
ordering = ("title",)
verbose_name = "brand"
verbose_name_plural = "brands"
def __str__(self):
return self.title
class Item(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
price = models.FloatField()
available = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True)
discount_price = models.FloatField(blank=True, null=True)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, blank=True, default=1
)
brand = models.ForeignKey(
Brand, on_delete=models.CASCADE, blank=True, default=1)
label = models.CharField(choices=LABEL_CHOICES, max_length=1, default="P")
image = models.ImageField(default="default.jpg",
upload_to="Items/%Y/%M/%d")
slug = models.SlugField(max_length=200, db_index=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("core:product", kwargs={"slug": self.slug, "id": self.id})
def get_add_to_cart_url(self):
return reverse("core:add_to_cart", kwargs={"slug": self.slug, "id": self.id})
def remove_from_cart_url(self):
return reverse(
"core:remove_from_cart", kwargs={"slug": self.slug, "id": self.id}
)
class OrderItem(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True
)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
ordered = models.BooleanField(default=False)
quantity = models.IntegerField(default=1)
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
return self.quantity * self.item.price
def get_total_discount_item_price(self):
return self.quantity * self.item.discount_price
def get_final_price(self):
if self.item.discount_price:
return self.get_total_discount_item_price()
return self.get_total_item_price()
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
items = models.ManyToManyField(OrderItem)
startdate = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
ordered = models.BooleanField(default=False)
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
return total
| [
"xf.xavierfrank@gmail.com"
] | xf.xavierfrank@gmail.com |
71a7c241fcfdd2288e94c907df7bf4fcf021f70a | 844564c24b1757110e00f6acff5658eea52d540a | /train_lgbm_model.py | 3b41c9c94b8c1e4a0db203573e717778bb377aab | [] | no_license | pekkipo/earth_quakes_challenge | df9dd375607ec72eea42985f284c46f221c9048b | be5c44e83102e6cf0b95ad7eb4002bc82748535e | refs/heads/master | 2020-07-26T14:44:24.145905 | 2019-09-16T00:59:14 | 2019-09-16T00:59:14 | 208,679,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 16:32:47 2019
@author: aleks
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import mean_absolute_error
import lightgbm as lgb
import os
import gc
from datetime import datetime
from imblearn.over_sampling import SMOTE
from sklearn.pipeline import Pipeline
import feature_engineering as fe
import get_models
import params
gc.enable()
NAME = 'lgbm'
def fit_lgb(X_fit, y_fit, X_val, y_val, counter, lgb_path):
model = get_models.get_lgbm_2() # Lol this will not work
model.fit(X_fit, y_fit,
eval_set=[(X_val, y_val)],
verbose=3500,
early_stopping_rounds=3500) # why early stopping?
cv_val = model.predict(X_val)
#Save LightGBM Model
save_to = '{}{}_fold{}.txt'.format(lgb_path, NAME, counter+1)
model.booster_.save_model(save_to)
return cv_val
def train_stage(df, y_df, df_ids, lgb_path):
lgb_cv_result = np.zeros(df.shape[0])
skf = StratifiedKFold(n_splits=params.num_folds, shuffle=True, random_state=42) # what is this doing?
skf.get_n_splits(df_ids, y_df) # and this? -> splits the data in train and test?
print('\nModel Fitting...')
for counter, ids in enumerate(skf.split(df_ids, y_df)):
print('\nFold {}'.format(counter+1))
X_fit, y_fit = df.values[ids[0]], y_df[ids[0]]
X_val, y_val = df.values[ids[1]], y_df[ids[1]]
lgb_cv_result[ids[1]] += fit_lgb(X_fit, y_fit, X_val, y_val, counter, lgb_path)
del X_fit, X_val, y_fit, y_val
gc.collect()
mae_lgb = round(mean_absolute_error(y_df, lgb_cv_result), 6)
print('\nLightGBM VAL MAE: {}'.format(mae_lgb))
return 0
def prediction_stage(df, lgb_path, submit=True):
lgb_models = sorted(os.listdir(lgb_path))
lgb_result = np.zeros(df.shape[0])
print('\nMake predictions...\n')
for m_name in lgb_models:
#Load LightGBM Model
model = lgb.Booster(model_file='{}{}'.format(lgb_path, m_name))
lgb_result += model.predict(df.values)
lgb_result /= len(lgb_models)
if submit:
submission = pd.read_csv(params.submission_file)
submission['time_to_failure'] = lgb_result
submission.to_csv(params.submission_out_file, index=False)
return 0
############ RUN
train_path = 'data/train.csv'
test_path = 'data/test.csv'
print('Load Train Data.')
df_train = pd.read_csv(train_path)
print('\nShape of Train Data: {}'.format(df_train.shape))
print('Load Test Data.')
df_test = pd.read_csv(test_path)
print('\nShape of Test Data: {}'.format(df_test.shape))
#Create dir for models
#os.mkdir(lgb_path)
print('Train Stage.\n')
train_stage(df_train, params.lgb_path)
print('Prediction Stage.\n')
prediction_stage(df_test, params.lgb_path, False)
print('\nDone.') | [
"pekkipodev@gmail.com"
] | pekkipodev@gmail.com |
30a5b8c7cc8bd95a82e561e28a5d7be11faff95e | 3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7 | /mounth001/day04/exercise03.py | 4d2f9b5972212719de0f5a3f68193db7e92b29ea | [] | no_license | Molly-l/66 | 4bfe2f93e726d3cc059222c93a2bb3460b21ad78 | fae24a968f590060522d30f1b278fcfcdab8b36f | refs/heads/master | 2020-09-28T12:50:18.590794 | 2019-11-27T04:42:28 | 2019-11-27T04:42:28 | 226,782,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py |
sum=0
for i in range(5):
num1 = int(input('输入第一个数'))
num2 = int(input('输入第二个数'))
sum=str(num1)+'+'+(num2)+'+?'
sum=num1+num2
sum +=20
print(sum) | [
"769358744@qq.com"
] | 769358744@qq.com |
baa83d0b02e9df48bb9be99967b9f99428558a1f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_277/ch164_2020_06_20_22_09_38_618562.py | 3df906c7e0595b431ca66d73d3e74969660f4e0f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | def traduz(lista, dicionario):
lista2 = []
for i in range(len(lista)):
ingles = lista[i]
for j in dicionario.keys():
traducao = dicionario[ingles]
lista2.append(traducao)
return lista2 | [
"you@example.com"
] | you@example.com |
a501db98c6eb3c2225a4f5809c45cf52064f7c14 | 625574700973e0d52c5435c0fa60007a0a8cc0a1 | /contest/HHKB/d.py | 6185adaed6dd05d0bb42d27752175732161d8922 | [] | no_license | konchanksu/AtCoder-practice | b1c63fb6f6da8a409617b23438edf5469773049d | 46a7d9b9b33d4fbbcffeb6bb90d4bfca8d5dfa2a | refs/heads/main | 2023-08-24T17:35:30.193120 | 2021-09-29T05:38:40 | 2021-09-29T05:38:40 | 311,109,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | MOD = 10 ** 9 + 7
for _ in range(int(input())):
N, A, B = map(int, input().split())
pt = (pow((N - A + 1), 2, MOD) * pow((N - B + 1), 2, MOD)) % MOD
for i in range((N + 1) // 2):
t = ((N - i * 2) - A) * 4
if t < 0:
break
elif t == 0:
t = 1
| [
"nyankomi59@gmail.com"
] | nyankomi59@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.