blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf830a313be8f3b1b280ae07f12875ce12232785 | d2e46edc507addd113c583ca160b829c20303f74 | /algorithm/__init__.py | b8a5f3e5dda777f5d6cd21a5b0024610a48795e6 | [] | no_license | heibaishicha/ytc_algorithm | e698f6f17af9aeb4fe66c2dfc6c1a16223c4bb96 | 9b1823f587acd1929d97e465c624e726f37be470 | refs/heads/master | 2020-05-07T09:35:59.489357 | 2019-04-09T14:24:11 | 2019-04-09T14:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/30 17:48
# @Author : LuoJie
# @Email : 2715053558@qq.com
# @File : __init__.py.py
# @Software: PyCharm
| [
"admin@example.com"
] | admin@example.com |
b227982a6f4b1dcdf20a68d5983baf11b4135ae1 | 53c7428ddf3a09b7580e39a752f4eb3cc26cab92 | /beta/admin.py | 50fb29dc22b861bdfd5d48c0a1843fa35a8c7399 | [] | no_license | ozkilim/gauRENTeed | 8e8280b699ff6a5f759e4f8293ef1cfb282ca4d9 | a3d84dca79d3081ef8cbebc862db889d8d9c8d8d | refs/heads/main | 2023-06-05T21:36:47.734496 | 2021-05-23T14:38:05 | 2021-05-23T14:38:05 | 347,305,433 | 0 | 0 | null | 2021-06-30T17:46:16 | 2021-03-13T07:38:39 | CSS | UTF-8 | Python | false | false | 143 | py | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Property)
admin.site.register(Review)
| [
"ozkilim@hotmail.co.uk"
] | ozkilim@hotmail.co.uk |
7b3eef18163953e4a26da2ce3866588b6473418c | 60696fa455101fbd2bef5efc19910d2fc856c324 | /examples/tts-auth.py | 65389f7a4bfdda8e74b3c575472f7e7ef8ff89b4 | [] | no_license | yumilceh/libqi-python | 263baca432f118f15057c8a2b43e0321bb5609e1 | 900643316c272fcdb9f5de76111125fb05927108 | refs/heads/master | 2020-04-10T22:52:11.210697 | 2018-12-11T13:50:30 | 2018-12-11T13:50:30 | 161,334,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import argparse
import sys
import qi
import inspect
class Authenticator:
def __init__(self, user, pswd):
self.user = user
self.pswd = pswd
def initialAuthData(self):
cm = {'user': self.user, 'token': self.pswd}
return cm
class ClientFactory:
def __init__(self, user, pswd):
self.user = user
self.pswd = pswd
def newAuthenticator(self):
return Authenticator(self.user, self.pswd)
def parse_options(argv=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument("url", help="robot url (protocol + ip + port), eg tcps://10.0.0.0:9503")
parser.add_argument("user", help="username")
parser.add_argument("pswd", nargs='?', help="password")
args = parser.parse_args(argv)
return args.url, args.user, args.pswd
def run():
url, user, pswd = parse_options()
factory = ClientFactory(user, pswd)
session = qi.Session()
session.setClientAuthenticatorFactory(factory)
session.connect(url)
tts = session.service("ALTextToSpeech")
tts.call("say", "Hello python")
if __name__ == u"__main__":
run()
| [
"you@example.com"
] | you@example.com |
2d450c7830464d10c84d114edebc2209bf0aa441 | ea47c80745a2dca6491b65d1f4664acc87d38388 | /test.py | 863a0f7cb2eaabb0358a4f2ae3372585c22d4e2c | [
"MIT"
] | permissive | teja0508/Finding-Abbrevations-Using-Regx | cc7532bb5e084a5539e0821ec7a62bf7f5f9fb25 | ebfddd595a9c8d446510397e40b3138289dae903 | refs/heads/master | 2022-11-29T22:45:00.884441 | 2020-08-09T03:06:43 | 2020-08-09T03:06:43 | 286,157,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from abbr import findall
text = 'The published OASIS standard does not give any expansion of MQTT.[(Banks and Gupta 2014)]\r\n\r\nMQTT used to mean __MQ Telemetry Transport__, where MQ is sometimes mistaken for Message Queue. In fact, MQ referred to IBM'
d = findall(text) | [
"lchandratejareddy@gmail.com"
] | lchandratejareddy@gmail.com |
de47453e3fa1ead020dfe9f529636ee7c789629d | e3d739317399940aac5e2fcd468da3fa94c87a1c | /Projects/DatesandDatetime/venv/bin/pip3 | baa1772b22757d923e9b88a276002681643ddcca | [] | no_license | mohsen-mehrabani/Python_Udemy_Course | 9a03222b3ec5963108a48a044b15e2869b6d3338 | fdaef0fd970a956bccb0ee28de42b8ecf44d59b6 | refs/heads/master | 2021-01-30T13:11:10.762363 | 2020-05-01T14:02:16 | 2020-05-01T14:02:16 | 243,500,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | #!/home/mohsen/IdeaProjects/PythonPrograms/DatesandDatetime/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"mohsen.mehrabani@hotmail.com"
] | mohsen.mehrabani@hotmail.com | |
fd0776ab898fb794460db69632b705ff8c4cc685 | 636b31e9bef7c82183f288d441f08d448af49f9c | /parlai/tasks/vqa_v2/agents.py | 31706b0bd267d235d3306d87518535bc609a5f0d | [
"MIT"
] | permissive | ShaojieJiang/tldr | a553051c6b615237212082bbc21b09e9069929af | c878ed10addbae27fa86cc0560f168b14b94cf42 | refs/heads/master | 2023-08-21T23:36:45.148685 | 2020-04-08T15:09:35 | 2020-04-08T15:09:35 | 247,427,860 | 12 | 0 | MIT | 2023-08-11T19:52:22 | 2020-03-15T08:25:21 | Python | UTF-8 | Python | false | false | 5,968 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FixedDialogTeacher
from parlai.core.image_featurizers import ImageLoader
from parlai.tasks.vqa_v1.agents import VQADataset
from .build import build
from parlai.tasks.coco_caption.build_2014 import buildImage as buildImage_2014
from parlai.tasks.coco_caption.build_2015 import buildImage as buildImage_2015
import json
import os
def _path(opt):
build(opt)
buildImage_2014(opt)
buildImage_2015(opt)
dt = opt['datatype'].split(':')[0]
img_version = None
if dt == 'train':
ques_suffix = 'v2_OpenEnded_mscoco_train2014'
annotation_suffix = 'v2_mscoco_train2014'
img_suffix = os.path.join('train2014', 'COCO_train2014_')
img_version = '2014'
elif dt == 'valid':
ques_suffix = 'v2_OpenEnded_mscoco_val2014'
annotation_suffix = 'v2_mscoco_val2014'
img_suffix = os.path.join('val2014', 'COCO_val2014_')
img_version = '2014'
elif dt == 'test':
ques_suffix = 'v2_OpenEnded_mscoco_test2015'
annotation_suffix = 'None'
img_suffix = os.path.join('test2015', 'COCO_test2015_')
img_version = '2015'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'VQA-v2', ques_suffix + '_questions.json')
annotation_path = os.path.join(
opt['datapath'], 'VQA-v2', annotation_suffix + '_annotations.json'
)
image_path = os.path.join(
opt['datapath'], 'COCO-IMG-{}'.format(img_version), img_suffix
)
return data_path, annotation_path, image_path
class DefaultDataset(VQADataset):
pass
class OeTeacher(FixedDialogTeacher):
"""
VQA v2.0 Open-Ended teacher, which loads the json VQA data and implements the
``get`` method to return additional metadata.
"""
def __init__(self, opt, shared=None):
super().__init__(opt)
self.image_mode = opt.get('image_mode', 'no_image_model')
if shared and 'ques' in shared:
# another instance was set up already, just reference its data
self.ques = shared['ques']
if 'annotation' in shared:
self.annotation = shared['annotation']
self.image_loader = shared['image_loader']
else:
# need to set up data from scratch
data_path, annotation_path, self.image_path = _path(opt)
self._setup_data(data_path, annotation_path)
self.image_loader = ImageLoader(opt)
self.reset()
def reset(self):
super().reset() # call parent reset so other fields can be set up
self.example = None # set up caching fields
def num_examples(self):
return len(self.ques['questions'])
def num_episodes(self):
return self.num_examples()
def submit_load_request(self, image_id):
img_path = self.image_path + '%012d.jpg' % (image_id)
self.data_loader.request_load(
self.receive_data, self.image_loader.load, (img_path,)
)
def get(self, episode_idx, entry_idx=0):
qa = self.ques['questions'][episode_idx]
question = qa['question']
action = {'text': question, 'image_id': qa['image_id'], 'episode_done': True}
if not self.datatype.startswith('test'):
# test set annotations are not available for this dataset
anno = self.annotation['annotations'][episode_idx]
action['labels'] = [ans['answer'] for ans in anno['answers']]
return action
def next_example(self):
"""
Returns the next example from this dataset after starting to queue up the next
example.
"""
ready = None
# pull up the currently queued example
if self.example is not None:
if self.image_mode != 'no_image_model':
# move the image we loaded in the background into the example
image = self.data_queue.get()
self.example['image'] = image
ready = (self.example, self.epochDone)
# get the next base example: super().next_example() calls self.get()
self.example, self.epochDone = super().next_example()
if self.image_mode != 'no_image_model' and 'image_id' in self.example:
# load the next image in the background
image_id = self.example['image_id']
self.submit_load_request(image_id)
# Try to return the previously cached example
if ready is None:
return self.next_example()
else:
return ready
def share(self):
shared = super().share()
shared['ques'] = self.ques
if hasattr(self, 'annotation'):
shared['annotation'] = self.annotation
shared['image_loader'] = self.image_loader
return shared
def _setup_data(self, data_path, annotation_path):
print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if not self.datatype.startswith('test'):
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
class AllTeacher(OeTeacher):
"""
VQA v2.0 Open-Ended teacher, which inherits from OeTeacher and gives access to the
multiple choice answer.
"""
def act(self):
action = super().act()
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][self.episode_idx]
self.mclabel = [anno['multiple_choice_answer']]
if self.datatype.startswith('train'):
action['mc_label'] = self.mclabel
return action
class DefaultTeacher(OeTeacher):
pass
| [
"s.jiang@uva.nl"
] | s.jiang@uva.nl |
2500e178fcabab91be0ceabc415ea17af246f442 | 21ba58c82ebf5d1de8933010154c951bfb08c169 | /setup.py | 81b0af01c8f965e3fb45a52e798ec4d8ebb602f1 | [] | no_license | nanounanue/modelvis | 6bb7af256f9bd10d0fc349e8788d3bdf946f2c01 | 7780f89835ed16627ae5cd55320c3c9c171e1016 | refs/heads/master | 2020-03-16T17:47:59.145349 | 2018-05-08T06:43:52 | 2018-05-08T06:43:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from setuptools import setup
import re
import os
def get_version():
"""Reads the version from modelvis.py.
"""
root = os.path.dirname(__file__)
version_path = os.path.join(root, "modelvis.py")
text = open(version_path).read()
rx = re.compile("^__version__ = '(.*)'", re.M)
m = rx.search(text)
version = m.group(1)
return version
version = get_version()
setup(
name="modelvis",
version=version,
description="Visualising Machine Learning Models",
license="MIT",
install_requires=[
'numpy',
'pandas',
'scikit-learn',
'matplotlib',
'requests',
'seaborn'
],
py_modules=['modelvis'],
author="Amit Kapoor",
author_email="amitkaps@gmail.com",
platforms="any"
)
| [
"anandology@gmail.com"
] | anandology@gmail.com |
934c099b4df15f88f638ee3236594e9fe279dd5d | e467229c16d55eef78950c64c91ac3585a12a8f7 | /doctorhub/home/specialties/articles/serializers.py | bdbb3ec9ace6ca78a14b1ea8c3c86d5bf521aa44 | [] | no_license | VahediRepositories/DigitalTeb | 5cde8c15fde2177a3688b6ba8e5dcc207df44446 | 65e0e7d83a39897eada2bb915c3e4bf503aa16cb | refs/heads/master | 2020-08-12T08:53:55.791992 | 2020-01-30T06:29:57 | 2020-01-30T06:29:57 | 214,732,699 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from ...models import *
class ArticlePageSerializer(serializers.ModelSerializer):
class Meta:
model = ArticlePage
fields = [
'id', 'owner', 'title',
]
| [
"mcs.hsn.vahedi@gmail.com"
] | mcs.hsn.vahedi@gmail.com |
6ac52375ab06ded966cb88f6edb763f031cc846c | da960bcd542ec9a607e80665374e1e4605895b43 | /src/zeam/form/autofields/tests/test_doctest.py | 80b7ad83fcb8e4e208baecab04cd1ef7d1ea7b1e | [] | no_license | thefunny42/Zeam-Form-AutoFields | 3d13a9fb1b2c45c8344047d1772c75a76147e59c | 333538f8558075616202306e84de20b610a75db1 | refs/heads/master | 2020-05-31T19:18:54.066041 | 2012-05-02T12:17:22 | 2012-05-02T12:17:22 | 364,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py |
import unittest
from zope.testing import doctest
from grokcore.component.testing import grok_component
from zeam.form.autofields.testing import FunctionalLayer
def test_suite():
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
globs= {'grok_component': grok_component}
suite = unittest.TestSuite()
for filename in ['fields.txt',]:
test = doctest.DocFileSuite(
filename,
optionflags=optionflags,
globs=globs)
test.layer = FunctionalLayer
suite.addTest(test)
return suite
| [
"thefunny@gmail.com"
] | thefunny@gmail.com |
bca22e2b65296791de22526ef43560a52b5a5833 | 1d1410d4b3455a6fed63185c2312c517d2c7c602 | /tests/test_nanopb_encode_decode.py | 4ca7e8b48230403a88edcd95743e61d0733fa992 | [] | no_license | wheeler-microfluidics/i2c_rpc | 05cbbc1ecb2eef0aa8eb8e886eeda42f55c4cfdd | 17205565aa985fce54a03961d8575c3a055413d8 | refs/heads/master | 2021-01-13T16:10:14.290001 | 2017-07-20T14:31:45 | 2017-07-20T14:31:45 | 25,372,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | from nose.tools import eq_
from i2c_rpc.board import I2CBoard
import numpy as np
import serial_device
def test_nanopb_encode_decode():
port = list(serial_device.get_serial_ports())[0]
i2c_local = I2CBoard(port=port)
i2c_devices = np.fromstring(i2c_local.i2c_scan(),
dtype=np.uint8).astype(int)
assert(len(i2c_devices) == 1)
remote_address = i2c_devices[0]
# Test `int` and `uint` for 8, 16, and 32-bit widths.
for signedness in ('u', ''):
for width in (8, 32):
if not signedness and 8:
# Skip signed 8-bit integers, since we do not have enough
# memory on the device to add the method for testing.
continue
upper = (1 << width) - 1 if signedness else (1 << (width - 1)) - 1
values = np.linspace(0, upper, num=100).astype(int)
for value in values:
yield (_test_nanopb_encode_echo_integral, signedness, width,
i2c_local, remote_address, value)
values = np.linspace(0, 100)
for value in values:
yield (_test_nanopb_encode_echo_float, i2c_local, remote_address,
value)
def _test_nanopb_encode_echo_integral(signedness, width, board, remote_address,
value):
method = getattr(board, 'test_nanopb_encode_echo_%sint%d' % (signedness,
width))
eq_(method(address=remote_address, value=value), value)
def _test_nanopb_encode_echo_float(board, remote_address, value):
np.allclose([board.test_nanopb_encode_echo_float(address=remote_address,
value=value)], [value])
| [
"christian@fobel.net"
] | christian@fobel.net |
828bcec774b921791faa7ea5911bbede0ac2dc7e | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/dashboard/v4_1/models/dashboard_group_entry.py | 5c3c9540e99773cdd7bf00ec7c14e45c7c07c20c | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 2,728 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .dashboard import Dashboard
class DashboardGroupEntry(Dashboard):
"""DashboardGroupEntry.
:param _links:
:type _links: :class:`ReferenceLinks <dashboard.v4_1.models.ReferenceLinks>`
:param description: Description of the dashboard.
:type description: str
:param eTag: Server defined version tracking value, used for edit collision detection.
:type eTag: str
:param id: ID of the Dashboard. Provided by service at creation time.
:type id: str
:param name: Name of the Dashboard.
:type name: str
:param owner_id: ID of the Owner for a dashboard. For any legacy dashboards, this would be the unique identifier for the team associated with the dashboard.
:type owner_id: str
:param position: Position of the dashboard, within a dashboard group. If unset at creation time, position is decided by the service.
:type position: int
:param refresh_interval: Interval for client to automatically refresh the dashboard. Expressed in minutes.
:type refresh_interval: int
:param url:
:type url: str
:param widgets: The set of Widgets on the dashboard.
:type widgets: list of :class:`Widget <dashboard.v4_1.models.Widget>`
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'description': {'key': 'description', 'type': 'str'},
'eTag': {'key': 'eTag', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'owner_id': {'key': 'ownerId', 'type': 'str'},
'position': {'key': 'position', 'type': 'int'},
'refresh_interval': {'key': 'refreshInterval', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'},
'widgets': {'key': 'widgets', 'type': '[Widget]'},
}
def __init__(self, _links=None, description=None, eTag=None, id=None, name=None, owner_id=None, position=None, refresh_interval=None, url=None, widgets=None):
super(DashboardGroupEntry, self).__init__(_links=_links, description=description, eTag=eTag, id=id, name=name, owner_id=owner_id, position=position, refresh_interval=refresh_interval, url=url, widgets=widgets)
| [
"tedchamb@microsoft.com"
] | tedchamb@microsoft.com |
7f5300fb30579d527ab9d31b29e298cc226c82f3 | 9bd23db85cfd5f1d068f4a0b392210ac023072d2 | /porcupine/plugins/run/terminal.py | 3ff5c2ce7f56bdd77b0ff238489fd40096c83c8b | [
"MIT"
] | permissive | bvadebruna/porcupine | a399c88e09f31f74737ffd562f99a8fed2f332b3 | 7d3f13546d375cccba73525e3b3f6240b8e74b23 | refs/heads/master | 2022-12-18T07:28:46.378698 | 2020-08-24T10:55:17 | 2020-08-24T10:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,044 | py | """Run commands in a new terminal window."""
import logging
import os
import pathlib
import platform
import shlex
import shutil
import subprocess
import tempfile
from tkinter import messagebox
from typing import List, Union
from porcupine import get_main_window, utils
log = logging.getLogger(__name__)
_this_dir = pathlib.Path(__file__).absolute().parent
if platform.system() == 'Windows':
run_script = _this_dir / 'windows_run.py'
else:
run_script = _this_dir / 'bash_run.sh'
CommandArgument = Union[str, pathlib.Path]
# getting this to work in powershell turned out to be hard :(
# TODO: tests
def _run_in_windows_cmd(
blue_message: str,
workingdir: pathlib.Path,
command: List[str]) -> None:
log.debug("using Windows command prompt")
command = [
str(utils.python_executable), str(run_script),
blue_message, str(workingdir),
] + command
if not utils.running_pythonw:
# windows wants to run python in the same terminal that
# Porcupine was started from, this is the only way to open a
# new command prompt i found and it works :) we need cmd
# because start is built in to cmd (lol)
command = ['cmd', '/c', 'start'] + command
subprocess.Popen(command)
def _run_in_osx_terminal_app(
blue_message: str,
workingdir: pathlib.Path,
command: List[str]) -> None:
log.debug("using OSX terminal.app")
bash = shutil.which('bash')
assert bash is not None
# passing arguments is not easy, these things are wrong with this:
# - i needed to cheat and use stackoverflow because i don't
# have a mac :( http://stackoverflow.com/a/989357
# - new OSX versions keep the terminal open by
# default but older versions don't, so people using old
# OSX versions need to change their terminal settings
# big thanks to go|dfish for testing an older version of this code!
# this exact code is NOT TESTED :/
real_command = [str(run_script), '--dont-wait',
blue_message, str(workingdir)] + list(map(str, command))
with tempfile.NamedTemporaryFile(
'w', delete=False, prefix='porcupine-run-') as file:
print('#!/usr/bin/env bash', file=file)
print('rm', shlex.quote(file.name), file=file) # see below
print(' '.join(map(shlex.quote, real_command)), file=file)
os.chmod(file.name, 0o755)
subprocess.Popen(['open', '-a', 'Terminal.app', file.name])
# the terminal might be still opening when we get here, that's why
# the file deletes itself
# right now the file removes itself before it runs the actual command so
# it's removed even if the command is interrupted
def _run_in_x11_like_terminal(
blue_message: str, workingdir: pathlib.Path,
command: List[str]) -> None:
terminal: str = os.environ.get('TERMINAL', 'x-terminal-emulator')
# to config what x-terminal-emulator is:
#
# $ sudo update-alternatives --config x-terminal-emulator
#
# TODO: document this
if terminal == 'x-terminal-emulator':
log.debug("using x-terminal-emulator")
terminal_or_none = shutil.which(terminal)
if terminal_or_none is None:
log.warning("x-terminal-emulator not found")
# Ellusion told me on irc that porcupine didn't find his
# xfce4-terminal, and turned out he had no x-terminal-emulator...
# i'm not sure why, but this should work
#
# well, turns out he's using arch, so... anything could be wrong
terminal_or_none = shutil.which('xfce4-terminal')
if terminal_or_none is None:
# not much more can be done
messagebox.showerror(
"x-terminal-emulator not found",
"Cannot find x-terminal-emulator in $PATH. "
"Are you sure that you have a terminal installed?")
return
terminal_path = pathlib.Path(terminal_or_none)
log.info("found a terminal: %s", terminal_path)
terminal_path = terminal_path.resolve()
log.debug("absolute path to terminal: %s", terminal_path)
# sometimes x-terminal-emulator points to mate-terminal.wrapper,
# it's a python script that changes some command line options
# and runs mate-terminal but it breaks passing arguments with
# the -e option for some reason
if terminal_path.name == 'mate-terminal.wrapper':
log.info("using mate-terminal instead of mate-terminal.wrapper")
terminal = 'mate-terminal'
else:
terminal = str(terminal_path)
else:
log.debug("using $TERMINAL, it's set to %r" % terminal)
if shutil.which(terminal) is None:
messagebox.showerror(
"%r not found" % terminal,
"Cannot find %r in $PATH. "
"Try setting $TERMINAL to a path to a working terminal program."
% terminal)
return
real_command = [str(run_script), blue_message, str(workingdir)]
real_command.extend(map(str, command))
subprocess.Popen([terminal, '-e',
' '.join(map(shlex.quote, real_command))])
# this figures out which terminal to use every time the user wants to run
# something but it doesn't really matter, this way the user can install a
# terminal while porcupine is running without restarting porcupine
def run_command(workingdir: pathlib.Path, command: List[str]) -> None:
blue_message = ' '.join(map(utils.quote, command))
widget = get_main_window() # any tkinter widget works
windowingsystem = widget.tk.call('tk', 'windowingsystem')
if windowingsystem == 'win32':
_run_in_windows_cmd(blue_message, workingdir, command)
elif windowingsystem == 'aqua' and not os.environ.get('TERMINAL', ''):
_run_in_osx_terminal_app(blue_message, workingdir, command)
else:
_run_in_x11_like_terminal(blue_message, workingdir, command)
| [
"akuviljanen17@gmail.com"
] | akuviljanen17@gmail.com |
95d3915b3a71215d5322d9bef5fda4353300f8d0 | 7db513150b7daaf1f8ac435c41b850545156d037 | /FTP_Socket/client/conf/settings.py | 5abab9954c422af6c2dc2d28b9fc5ab2c0946aaa | [] | no_license | tonglinge/MyProjects | bb94e141e1833dc38fe2b41d5b80fb4abc222cf0 | 002f80dcc07e3502610b0a0be1e91fe61bcfc42c | refs/heads/master | 2021-04-15T14:53:46.175035 | 2017-03-19T04:39:48 | 2017-03-19T04:39:48 | 61,760,333 | 4 | 5 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!/usr/bin/env python
"""
客户端配置文件
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
# 服务端地址
FTP_SERVER_IP = "127.0.0.1"
FTP_SERVER_PORT = 9999
# 文件下载保存路径
DOWNLOAD_FILE_PATH = os.path.join(BASE_DIR, "download")
# 日志文件存放路径
LOGS = os.path.join(BASE_DIR, "logs/ftpclient.log")
| [
"songfreeman@sina.com"
] | songfreeman@sina.com |
7f7339ed21e6991411edcff9a1aaa570ab227716 | 2869c3bcd00130062a67a797c5460bb3cfaa2b59 | /stubs/micropython-esp32-1_12/uos.py | af1162071d02503c30699216c491ed5cba7a8116 | [
"MIT"
] | permissive | GeorgeVasil/micropython-stubs | 61cd388e36fe3726b4c0e2d699cb7b60278886c6 | 864ad67ba1ae9abd1bfd58fad8154310851372dc | refs/heads/master | 2023-08-14T23:11:44.160424 | 2021-09-29T09:31:07 | 2021-09-29T09:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | """
Module: 'uos' on esp32 1.12.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32')
# Stubber: 1.3.2
class VfsFat:
''
def chdir():
pass
def getcwd():
pass
def ilistdir():
pass
def mkdir():
pass
def mkfs():
pass
def mount():
pass
def open():
pass
def remove():
pass
def rename():
pass
def rmdir():
pass
def stat():
pass
def statvfs():
pass
def umount():
pass
class VfsLfs2:
''
def chdir():
pass
def getcwd():
pass
def ilistdir():
pass
def mkdir():
pass
def mkfs():
pass
def mount():
pass
def open():
pass
def remove():
pass
def rename():
pass
def rmdir():
pass
def stat():
pass
def statvfs():
pass
def umount():
pass
def chdir():
pass
def dupterm():
pass
def dupterm_notify():
pass
def getcwd():
pass
def ilistdir():
pass
def listdir():
pass
def mkdir():
pass
def mount():
pass
def remove():
pass
def rename():
pass
def rmdir():
pass
def stat():
pass
def statvfs():
pass
def umount():
pass
def uname():
pass
def urandom():
pass
| [
"josverl@microsoft.com"
] | josverl@microsoft.com |
a615d8239e762e1befeb6248b16d37f7432c121a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03295/s180994390.py | aecfc00674c892442a39619d8e24d01a25712c98 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | N, M = map(int, input().split())
AB = [list(map(int, input().split())) for _ in range(M)]
AB.sort()
prev, ans = AB[0][1], 1
for a, b in AB[1:]:
if prev <= a:
ans += 1
prev = b
else:
prev = min(prev, b)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
15086b70aa9e3260993f44cb546ea4e23cad0155 | 89f079c8f53c434a35f1f5c7492eab3317092353 | /Python Pattern Programs/Symbol Patterns/Pattern 69.py | 58b35fd1d42c07522355dec836d09e6e32c72022 | [
"MIT"
] | permissive | gitter-badger/Printing-Pattern-Programs | 8ceb074f6955acdcf8df22de7af2bef28ac36243 | c3f5d2aee9f15f152b7e15f92defe1197b2b1406 | refs/heads/main | 2023-03-21T10:01:45.327121 | 2021-02-25T03:25:43 | 2021-02-25T03:25:43 | 343,142,729 | 0 | 0 | MIT | 2021-02-28T15:44:23 | 2021-02-28T15:44:23 | null | UTF-8 | Python | false | false | 212 | py | n = 4
for x in range(n, -1, -1):
for y in range(n, -1, -1):
if y > x:
print("*", end=" ")
else:
print(chr(y+65), end=" ")
print()
"""
65 > ASCII of 'A'
""" | [
"arya.shah82@nmims.edu.in"
] | arya.shah82@nmims.edu.in |
aa0ad832fc53a100441ac4b0f83314375d6fa964 | 83d1e53d952b372ee9058869fc655a65551c0e51 | /bwshop/apps/trade/migrations/0001_initial.py | 1403a1bcd54e793a33c580b21e97c81cbdb24923 | [] | no_license | zrxingchen/END | 63c5fcb52b09c5e9a21aa3e97365024d39c0ee08 | 0f38f43eea809ed0ed560de5c33682f11afe3ecf | refs/heads/master | 2023-01-08T08:32:03.041329 | 2020-11-12T09:11:17 | 2020-11-12T09:11:17 | 311,594,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,202 | py | # Generated by Django 2.2.6 on 2020-11-09 14:12
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_sn', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='订单编号')),
('nonce_str', models.CharField(blank=True, max_length=50, null=True, unique=True, verbose_name='随机加密串')),
('trade_no', models.CharField(blank=True, max_length=100, null=True, unique=True, verbose_name='交易号')),
('pay_status', models.CharField(choices=[('TRADE_SUCCESS', '成功'), ('TRADE_CLOSED', '超时关闭'), ('WAIT_BUYER_PAY', '交易创建'), ('TRADE_FINISHED', '交易结束'), ('paying', '待支付')], default='paying', max_length=30, verbose_name='订单状态')),
('pay_type', models.CharField(choices=[('alipay', '支付宝'), ('wechat', '微信')], default='alipay', max_length=10, verbose_name='支付类型')),
('post_script', models.CharField(max_length=200, verbose_name='订单留言')),
('order_mount', models.FloatField(default=0.0, verbose_name='订单金额')),
('pay_time', models.DateTimeField(blank=True, null=True, verbose_name='支付时间')),
('address', models.CharField(default='', max_length=100, verbose_name='收货地址')),
('signer_name', models.CharField(default='', max_length=20, verbose_name='签收人')),
('singer_mobile', models.CharField(max_length=11, verbose_name='联系电话')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '订单信息',
'verbose_name_plural': '订单信息',
},
),
migrations.CreateModel(
name='OrderGoods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goods_num', models.IntegerField(default=0, verbose_name='商品数量')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='goods', to='trade.OrderInfo', verbose_name='订单信息')),
],
options={
'verbose_name': '订单商品',
'verbose_name_plural': '订单商品',
},
),
migrations.CreateModel(
name='ShoppingCart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nums', models.IntegerField(default=0, verbose_name='购买数量')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('goods', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.Goods', verbose_name='商品')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '购物车喵',
'verbose_name_plural': '购物车喵',
'unique_together': {('user', 'goods')},
},
),
]
| [
"2425277916@qq.com"
] | 2425277916@qq.com |
21f9f4dc10c4149e1e01f1db02c23101ef77c931 | f84540a209490c4d3ee7583c4668fe1c8b1c230e | /Backtracking/NDigitPassword.py | 4b8022f5a9aabcc03ff0c94866748383571f963f | [] | no_license | TimMKChang/AlgorithmSampleCode | 9e08a3a88f24b9645ca70f834970650d400fd259 | d5bcdce147bd6c3f05648962ca2096f79e4f003f | refs/heads/master | 2023-07-19T15:28:27.722181 | 2021-09-09T07:02:22 | 2021-09-09T07:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from typing import List
class Solution:
def create(self, depth: int, max_depth: int, password: List[int]):
if (depth == max_depth):
print(password)
return
for i in range(3):
password.append(i + 1)
self.create(depth + 1, max_depth, password)
password.pop()
def find_all_passwords(self: int, digits: List[int]):
self.create(0, digits, [])
s = Solution()
N = 3
s.find_all_passwords(N)
| [
"gn01168178@yahoo.com.tw"
] | gn01168178@yahoo.com.tw |
0273295cb507b70b59c231c4aef68cdd9773b8f3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03503/s181531838.py | 96394370c458ca0de1899ae6b7afe072c7e52033 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from itertools import product
def main():
N = int(input())
F = []
for _ in range(N):
f = list(map(int, input().split()))
F.append(f)
P = []
for _ in range(N):
p = list(map(int, input().split()))
P.append(p)
m = -10**21
for i in product(range(2), repeat=10):
if sum(i) == 0:
continue
r = 0
for j in range(N):
r += P[j][sum(ii&jj for ii, jj in zip(i, F[j]))]
m = max(r, m)
return m
print(main())
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed88470106faeb565120db40c9e710b85f78b2d3 | a0708a4e29d22ef408927f70dd3ded397e891129 | /REST_API/corona_REST/setting.py | 3c11859158f3fcbb588cd09656bc66cbcae3a63a | [
"MIT"
] | permissive | luyuliu/visualization-police-shootings | 0690861e06a543aaf9ae1929be8153bf88073a01 | 5745e6037597bda00c4d352ce7c861a5a8e7fd90 | refs/heads/master | 2022-09-17T06:45:12.405906 | 2020-06-04T14:02:34 | 2020-06-04T14:02:34 | 267,934,944 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | DOMAIN = {
'census_car_renter': {'datasource': {'source': 'census_car_renter'}},
'census_income': {'datasource': {'source': 'census_income'}},
'google_trend_20200417': {'datasource': {'source': 'google_trend_20200417'}},
'census_occupation_industry': {'datasource': {'source': 'census_occupation_industry'}},
'ridership': {'datasource': {'source': 'ridership'}},
'population': {'datasource': {'source': 'population'}},
'ridership_actual': {'datasource': {'source': 'ridership_actual'}},
'ridership_hourly': {'datasource': {'source': 'ridership_hourly'}},
'county_info': {'datasource': {'source': 'county_info'}},
'ALC_LinkMe': {'datasource': {'source': 'ALC_LinkMe'}},
'social_media': {'datasource': {'source': 'social_media'}},
'system_info_all': {'datasource': {'source': 'system_info_all'}},
'census_commute_time': {'datasource': {'source': 'census_commute_time'}},
'aggregated_ridership_hourly': {'datasource': {'source': 'aggregated_ridership_hourly'}},
'system_info_backup': {'datasource': {'source': 'system_info_backup'}},
'census_race': {'datasource': {'source': 'census_race'}},
'census_occu_pop': {'datasource': {'source': 'census_occu_pop'}},
'census_car_ownership': {'datasource': {'source': 'census_car_ownership'}},
'census_age': {'datasource': {'source': 'census_age'}},
'corona_cases_state_level': {'datasource': {'source': 'corona_cases_state_level'}},
'google_trend_backup': {'datasource': {'source': 'google_trend_backup'}},
'corona_cases_usafacts': {'datasource': {'source': 'corona_cases_usafacts'}},
'census_transit_pop': {'datasource': {'source': 'census_transit_pop'}},
'census_sex': {'datasource': {'source': 'census_sex'}},
'census_occupation_population': {'datasource': {'source': 'census_occupation_population'}},
'system_info': {'datasource': {'source': 'system_info'}},
'corona_cases_github': {'datasource': {'source': 'corona_cases_github'}},
'other_ridership_hourly': {'datasource': {'source': 'other_ridership_hourly'}},
'other_ridership': {'datasource': {'source': 'other_ridership'}},
}
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = "corona"
ALLOW_UNKNOWN=True
X_DOMAINS='*'
PAGINATION_LIMIT = 10000
PAGINATION_DEFAULT = 10000 | [
"liuluyu0378@gmail.com"
] | liuluyu0378@gmail.com |
63bbd5a911a9fbe0ad00b026d3a9b1ecd9f9af2c | 4aae2df13bfd53a8b16aa5f941f2cc8b8ac144b7 | /caffe2/python/trt/test_pt_onnx_trt.py | e066e8363a1792fee59080b1053d63d1d0e4007d | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | computerguy2030/pytorch-rocm-amd | e9f2718c470b505325d396baf6513e71bcf0a7ca | 38da53d721fcb335dedb1b52f14fd89718e90bef | refs/heads/master | 2023-04-08T00:55:01.542663 | 2021-04-16T11:33:39 | 2021-04-16T11:33:39 | 334,288,140 | 3 | 0 | NOASSERTION | 2021-04-16T11:27:55 | 2021-01-29T23:40:06 | C++ | UTF-8 | Python | false | false | 7,532 | py | ###################################################################################################
# ATTENTION! This test will most probably fail if you install TensorRT 6.0.1 only.
# That's because it's shipped with older version of ONNX parser not supporting some
# required features. To make it work please use new version: https://github.com/onnx/onnx-tensorrt
# Just clone it and do something like this:
#
# ~/pt/third_party/onnx-tensorrt$ mkdir build/
# ~/pt/third_party/onnx-tensorrt$ cd build/
# ~/pt/third_party/onnx-tensorrt/build$ cmake ..
# ~/pt/third_party/onnx-tensorrt/build$ make
# ~/pt/third_party/onnx-tensorrt/build$ sudo cp libnvonnxparser.so.6.0.1 /usr/lib/x86_64-linux-gnu
#
# This note is valid for 6.0.1 release only. September 18th, 2019.
###################################################################################################
import os
import unittest
from PIL import Image
import numpy as np
import torch
import torchvision.models as models
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)),
dtype=trt.nptype(trt.float32))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)),
dtype=trt.nptype(trt.float32))
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
stream = cuda.Stream()
return h_input, d_input, h_output, d_output, stream
def load_normalized_test_case(input_shape, test_image, pagelocked_buffer, normalization_hint):
def normalize_image(image):
c, h, w = input_shape
image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1])\
.astype(trt.nptype(trt.float32)).ravel()
if (normalization_hint == 0):
return (image_arr / 255.0 - 0.45) / 0.225
elif (normalization_hint == 1):
return (image_arr / 256.0 - 0.5)
np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
return test_image
class Test_PT_ONNX_TRT(unittest.TestCase):
def __enter__(self):
return self
def setUp(self):
data_path = os.path.join(os.path.dirname(__file__), 'data')
self.image_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg"]
for index, f in enumerate(self.image_files):
self.image_files[index] = os.path.abspath(os.path.join(data_path, f))
if not os.path.exists(self.image_files[index]):
raise FileNotFoundError(self.image_files[index] + " does not exist.")
self.labels = open(os.path.abspath(os.path.join(data_path, "class_labels.txt")), 'r').read().split('\n')
def build_engine_onnx(self, model_file):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(flags = 1) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 33
with open(model_file, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
self.fail("ERROR: {}".format(parser.get_error(error)))
return builder.build_cuda_engine(network)
def _test_model(self, model_name, input_shape = (3, 224, 224), normalization_hint = 0):
model = getattr(models, model_name)(pretrained=True)
shape = (1,) + input_shape
dummy_input = (torch.randn(shape),)
onnx_name = model_name + ".onnx"
torch.onnx.export(model,
dummy_input,
onnx_name,
input_names = [],
output_names = [],
verbose=False,
export_params=True,
opset_version=9)
with self.build_engine_onnx(onnx_name) as engine:
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
err_count = 0
for index, f in enumerate(self.image_files):
test_case = load_normalized_test_case(input_shape, f,\
h_input, normalization_hint)
cuda.memcpy_htod_async(d_input, h_input, stream)
context.execute_async_v2(bindings=[d_input, d_output],
stream_handle=stream.handle)
cuda.memcpy_dtoh_async(h_output, d_output, stream)
stream.synchronize()
amax = np.argmax(h_output)
pred = self.labels[amax]
if "_".join(pred.split()) not in\
os.path.splitext(os.path.basename(test_case))[0]:
err_count = err_count + 1
self.assertLessEqual(err_count, 1, "Too many recognition errors")
def test_alexnet(self):
self._test_model("alexnet", (3, 227, 227))
def test_resnet18(self):
self._test_model("resnet18")
def test_resnet34(self):
self._test_model("resnet34")
def test_resnet50(self):
self._test_model("resnet50")
def test_resnet101(self):
self._test_model("resnet101")
@unittest.skip("Takes 2m")
def test_resnet152(self):
self._test_model("resnet152")
def test_resnet50_2(self):
self._test_model("wide_resnet50_2")
@unittest.skip("Takes 2m")
def test_resnet101_2(self):
self._test_model("wide_resnet101_2")
def test_squeezenet1_0(self):
self._test_model("squeezenet1_0")
def test_squeezenet1_1(self):
self._test_model("squeezenet1_1")
def test_googlenet(self):
self._test_model("googlenet")
def test_inception_v3(self):
self._test_model("inception_v3")
def test_mnasnet0_5(self):
self._test_model("mnasnet0_5", normalization_hint = 1)
def test_mnasnet1_0(self):
self._test_model("mnasnet1_0", normalization_hint = 1)
def test_mobilenet_v2(self):
self._test_model("mobilenet_v2", normalization_hint = 1)
def test_shufflenet_v2_x0_5(self):
self._test_model("shufflenet_v2_x0_5")
def test_shufflenet_v2_x1_0(self):
self._test_model("shufflenet_v2_x1_0")
def test_vgg11(self):
self._test_model("vgg11")
def test_vgg11_bn(self):
self._test_model("vgg11_bn")
def test_vgg13(self):
self._test_model("vgg13")
def test_vgg13_bn(self):
self._test_model("vgg13_bn")
def test_vgg16(self):
self._test_model("vgg16")
def test_vgg16_bn(self):
self._test_model("vgg16_bn")
def test_vgg19(self):
self._test_model("vgg19")
def test_vgg19_bn(self):
self._test_model("vgg19_bn")
@unittest.skip("Takes 13m")
def test_densenet121(self):
self._test_model("densenet121")
@unittest.skip("Takes 25m")
def test_densenet161(self):
self._test_model("densenet161")
@unittest.skip("Takes 27m")
def test_densenet169(self):
self._test_model("densenet169")
@unittest.skip("Takes 44m")
def test_densenet201(self):
self._test_model("densenet201")
if __name__ == '__main__':
unittest.main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ffc7c452a11af0ae9b79afd67461866a0a612256 | a1c6fea0703d7d813a88aae91a7fbb17e06785ea | /top/api/rest/NlpSimilarityRequest.py | 9c88240e589cb62c89e5d5cdc34c7831fce17aa3 | [] | no_license | warm200/SpokesTribe | bea676b2868272ceab17176d7eb5d98ae7747543 | 8c3671214e317987645aeef4451e590bcb772f7e | refs/heads/master | 2022-01-11T18:12:40.847007 | 2019-02-08T13:08:38 | 2019-02-08T13:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | '''
Created by auto_sdk on 2016.01.12
'''
from top.api.base import RestApi
class NlpSimilarityRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.texts = None
def getapiname(self):
return 'taobao.nlp.similarity'
| [
"zhengyufei19999@163.com"
] | zhengyufei19999@163.com |
57855a19eec5c60c70856d94ce80d8e4359a1dfc | 222d2a8ca9769f4a263a627ddd8aff6032fe2c88 | /solutions/pybasic_ex2_4_2.py | a9cee66260abfd01fb0a4a1f39f8eb549eea9926 | [
"Unlicense"
] | permissive | seb-mueller/python-basic | 4bdc560191a0edd6c6996f05f1465824d7908060 | f28481455a238c07fb1c2b3c935361a1abb44e32 | refs/heads/master | 2020-04-23T20:40:24.548159 | 2018-12-19T11:36:26 | 2018-12-19T11:36:26 | 171,448,160 | 1 | 0 | Unlicense | 2019-02-19T09:50:04 | 2019-02-19T09:50:03 | null | UTF-8 | Python | false | false | 1,033 | py | # Read lyrics from file
with open('data/imagine.txt') as f:
lyrics = f.read()
print(lyrics)
# Change all character to lower ones
lyrics = lyrics.lower()
# Split into words
words = lyrics.split()
print(words)
# Print the total number of words
print('There are', len(words), 'words in this song.')
# Print the number of distinct words
unique_words = set(words)
print('There are', len(unique_words), 'distinct words in this song.')
# Calculate the frequency of each word and store the result into a dictionary
results = {}
for w in unique_words:
results[w.lower()] = words.count(w)
# Print each unique word along with its frequency
for r in results:
print(results[r], '\t', r)
# Find the most frequent word in the song
most_frequent = 0
for r in results:
if (results[r] > most_frequent) and (len(r) > 3):
most_frequent = results[r]
most_frequent_word = r
# Print the most frequent word with its frequency
print(most_frequent_word, 'is the most frequent word being used', most_frequent, 'times.')
| [
"pajanne@gmail.com"
] | pajanne@gmail.com |
7dba57c2d4b48efaa2edf0c004e06c11abeb8626 | d7e4d46db1cfda7fb417ba4d185be0639d2d1280 | /scripts/tally_error_analysis.py | f3242b6ea24a9e99589e25f6be5b6a745de00f52 | [] | no_license | enewe101/relational-nouns-LREC-2018 | 4f830c7dc129ce988bef486b3e393228bdee4cd5 | d6d1689b9107401c12cb74e3a68dd75cda45266d | refs/heads/master | 2021-09-14T07:45:13.386635 | 2018-05-10T04:14:47 | 2018-05-10T04:14:47 | 105,477,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import t4k
import os
import itertools
from collections import defaultdict, Counter
import sys
sys.path.append('../lib')
from SETTINGS import DATA_DIR
err_fnames = [
'analyze-errors-rand-1.tsv',
'analyze-errors-rand-2.tsv',
'analyze-errors-top-1.tsv',
'analyze-errors-top-2.tsv',
]
err_paths = [os.path.join(DATA_DIR, fname) for fname in err_fnames]
err_tally_path = os.path.join(DATA_DIR, 'tallied-errors.tsv')
def tally_errors():
lines = []
for err_path in err_paths:
lines += open(err_path).readlines()
counts = Counter()
for line in t4k.skip_blank(lines):
line = line.strip()
try:
category, word, was_correct_str = line.split('\t')
except ValueError:
print line
raise
was_correct = check_was_correct(was_correct_str)
counts[category, was_correct] += 1
print counts
tally_file = open(err_tally_path, 'w')
buckets = itertools.product(['k','s','o','r','x'], [True, False])
for category, was_correct in buckets:
count = counts[category, was_correct]
tally_file.write('%s\t%s\t%d\n' % (category, str(was_correct), count))
return counts
def check_was_correct(was_correct_str):
if was_correct_str == '1':
return True
elif was_correct_str == '-1':
return False
else:
raise ValueError
if __name__ == '__main__':
tally_errors()
| [
"edward.newell@gmail.com"
] | edward.newell@gmail.com |
395c1d53940dcf496471e1e64ac10cd6ff04304c | a48e86b8ffe42874779c7de0639997a0d93a664d | /hub/overwatch_hub/util/__init__.py | 453ba7922aed4d0eb752c5ab76413f2196f45c23 | [
"MIT"
] | permissive | messa/ow2 | 2edd94eaa85fccd99b0c98e57863e4169c2f55c8 | 0fe383016c247c5b4c830fe396d4c2a69cbe4020 | refs/heads/main | 2023-02-24T18:17:17.405784 | 2022-12-16T19:03:33 | 2022-12-16T19:03:33 | 173,628,122 | 1 | 0 | MIT | 2023-01-07T04:07:14 | 2019-03-03T20:49:02 | Python | UTF-8 | Python | false | false | 202 | py | from .cache import LRUCache
from .datetime import to_utc, parse_datetime, utc_datetime_from_ms_timestamp
from .misc import random_str, smart_repr, to_compact_json
from .mongodb import get_mongo_db_name
| [
"petr.messner@gmail.com"
] | petr.messner@gmail.com |
2b5cf6aa3d829b179ff6e946371e475e97128243 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_165446.26+182224.6/sdB_sdssj_165446.26+182224.6_lc.py | 6b2df89071fee9a72e9777d845e3d778a649c8ac | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[253.69275,18.3735], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_165446.26+182224.6/sdB_sdssj_165446.26+182224.6_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
90c6ea6c849a48d57fab6ee97d93e7628aa81d86 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200321214031.py | b860a27e898b829f24562d3d3bd63257513d65e1 | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | import os
import subprocess
import re
from datetime import datetime
import time
numberOfTests = 10
tabuIteration = '5'
tabuDuration = '0'
numberOfCities = '10'
final_solution = []
list_coverage = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration, tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
else:
coverage = 5
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summery:")
optimum_result = len(list(filter(lambda x: x == 3473, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} iteration we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteratio') | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
23bc86709fcd9067a433ffebf06e58ea9d560879 | c7821b133e31e97af6e1aec75c1fd3039b56356f | /Class11/Ex4.py | 6317d53a7f1f343e3ce3155ddb6d453965429061 | [] | no_license | ksannedhi/kbyers-network-automation | b9089c28104f4c590ca33690e548c4d44331cdab | a4afa54e7b323c5529033c842db2b603f4dabf34 | refs/heads/master | 2022-11-22T23:41:32.245143 | 2020-07-19T14:44:43 | 2020-07-19T14:44:43 | 275,795,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | '''4a. Using an HTTP POST and the Python-requests library, create a new IP address in NetBox. This IP address object should be a /32 from the 192.0.2.0/24 documentation block.
Print out the status code and the returned JSON.
The HTTP headers for this request should look as follows:
http_headers = {}
http_headers["Content-Type"] = "application/json; version=2.4;"
http_headers["accept"] = "application/json; version=2.4;"
http_headers["Authorization"] = f"Token {token}"
The URL for the HTTP POST is:
https://netbox.lasthop.io/api/ipam/ip-addresses/
The JSON payload data for this request should be similar to the following:
data = {"address": "192.0.2.100/32"}
4b. Using the response data from the HTTP POST that created the IP address entry in exercise 4a, capture the "id" of the newly created IP address object.
Using this ID, construct a new URL. Use this new URL and the HTTP GET method to retrieve only the API information specific to this IP address.
Your IP address URL should be of the following form:
https://netbox.lasthop.io/api/ipam/ip-addresses/{address_id}/
where {address_id} is the ID of the object that you just created.
Pretty print the response.json() data from this HTTP GET. Please note the ID of the address object that you just created.'''
import os
import json
from pprint import pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
url = "https://netbox.lasthop.io/api/ipam/ip-addresses/"
token = "Token " + os.getenv("NETBOX_TOKEN")
http_headers = {"Content-Type": "application/json; version=2.4", "accept": "application/json; version=2.4", "authorization": token}
new_ip = {"address": "192.0.2.102/32"}
response = requests.post(url, headers = http_headers, data = json.dumps(new_ip), verify = False)
print(f'Response code: {response.status_code}')
print()
print("Returned JSON:")
pprint(response.json())
print()
address_id = response.json()["id"]
url = f"https://netbox.lasthop.io/api/ipam/ip-addresses/{address_id}"
response = requests.get(url, headers = http_headers, verify = False)
response = response.json()
print("Newly crerated IP address details:")
pprint(response)
| [
"noreply@github.com"
] | ksannedhi.noreply@github.com |
4fc124d863084cde934bae9e2844f44d547f0b31 | 8ecf4930f9aa90c35e5199d117068b64a8d779dd | /TopQuarkAnalysis/SingleTop/test/TTBar_MatchingUp_part_1_cfg.py | dc96144d1cb0591060b6d4acc955458b41a4d6e0 | [] | no_license | fabozzi/ST_44 | 178bd0829b1aff9d299528ba8e85dc7b7e8dd216 | 0becb8866a7c758d515e70ba0b90c99f6556fef3 | refs/heads/master | 2021-01-20T23:27:07.398661 | 2014-04-14T15:12:32 | 2014-04-14T15:12:32 | 18,765,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,899 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("SingleTopSystematics")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
FailPath = cms.untracked.vstring('ProductNotFound','Type Mismatch')
)
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff") ### real data
process.GlobalTag.globaltag = cms.string("START44_V13::All")
#Load B-Tag
#MC measurements from 36X
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDBMC36X")
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDBMC36X")
##Measurements from Fall10
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1011")
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1011")
#Spring11
process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1107")
process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1107")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# Process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20000))
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'file:/tmp/mmerola/TTBar_MatchingUpMerged.root',
#'rfio:/castor/cern.ch/user/m/mmerola/SingleTop_2012/MergedJune/TTBar_MatchingUpMerged.root',
),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
#eventsToProcess = cms.untracked.VEventRange('1:19517967-1:19517969'),
)
#from TTBar_MatchingUp import *
#process.source.fileNames = TTBar_MatchingUp_ntuple
#process.source.fileNames = cms.untracked.vstring("file:/tmp/mmerola/TTBar_MatchingUpMerged.root")
#PileUpSync
#Output
#process.TFileService = cms.Service("TFileService", fileName = cms.string("/castor/cern.ch/user/m/mmerola/SingleTop_2012/TreesJune/TTBar_MatchingUp_part_1.root"))
process.TFileService = cms.Service("TFileService", fileName = cms.string("/tmp/mmerola/TTBar_MatchingUp_part_1.root"))
#process.TFileService = cms.Service("TFileService", fileName = cms.string("testNoPU.root"))
#process.load("SingleTopAnalyzers_cfi")
process.load("SingleTopRootPlizer_cfi")
process.load("SingleTopFilters_cfi")
#from SingleTopPSets_cfi import *
#from SingleTopPSetsFall11_cfi import *
from SingleTopPSetsFall_cfi import *
process.TreesEle.dataPUFile = cms.untracked.string("pileUpDistr.root")
process.TreesMu.dataPUFile = cms.untracked.string("pileUpDistr.root")
#process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.channelInfo = TTBar_MatchingUpEle
process.TreesMu.channelInfo = TTBar_MatchingUpMu
#process.PlotsEle.channelInfo = TTBar_MatchingUpEle
#process.PlotsMu.channelInfo = TTBar_MatchingUpMu
#process.TreesMu.systematics = cms.untracked.vstring();
#doPU = cms.untracked.bool(False)
#process.WeightProducer.doPU = cms.untracked.bool(False)
#process.TreesMu.doQCD = cms.untracked.bool(False)
#process.TreesEle.doQCD = cms.untracked.bool(False)
#process.TreesMu.doResol = cms.untracked.bool(False)
#process.TreesEle.doResol = cms.untracked.bool(False)
#process.TreesMu.doPU = cms.untracked.bool(False)
#process.TreesEle.doPU = cms.untracked.bool(False)
channel_instruction = "allmc" #SWITCH_INSTRUCTION
#channel_instruction = "allmc" #SWITCH_INSTRUCTION
MC_instruction = True #TRIGGER_INSTRUCTION
process.HLTFilterMu.isMC = MC_instruction
process.HLTFilterEle.isMC = MC_instruction
process.HLTFilterMuOrEle.isMC = MC_instruction
process.HLTFilterMuOrEleMC.isMC = MC_instruction
#process.PUWeightsPath = cms.Path(
# process.WeightProducer
#)
if channel_instruction == "allmc":
# process.TreesMu.doResol = cms.untracked.bool(True)
# process.TreesEle.doResol = cms.untracked.bool(True)
# process.TreesEle.doTurnOn = cms.untracked.bool(True)
process.PathSysMu = cms.Path(
process.HLTFilterMuMC *
process.TreesMu
)
process.PathSysEle = cms.Path(
process.HLTFilterEleMC *
process.TreesEle
)
if channel_instruction == "all":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesMu.doPU = cms.untracked.bool(False)
process.PathSys = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuOrEle *
process.TreesMu +
process.TreesEle
)
if channel_instruction == "mu":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
# process.HLTFilterMu *
process.HLTFilterMuData *
process.TreesMu
)
if channel_instruction == "ele":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEle *
process.TreesEle
)
if channel_instruction == "muqcd":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuQCD *
process.TreesMu
)
if channel_instruction == "eleqcd":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.TreesEle.isControlSample = cms.untracked.bool(True)
process.PathSysEle = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEleQCD *
process.TreesEle
)
process.source.fileNames = cms.untracked.vstring('file:/tmp/mmerola/TTBar_MatchingUp_part_1Merged.root',) | [
"Francesco.Fabozzi@cern.ch"
] | Francesco.Fabozzi@cern.ch |
bce953ad15dd3e30f944c6d8717abeb32b5538ba | 69c882c678103b182988fb60d3e898d569980f1c | /Day 9/prog12.py | 7dbcbb802408087f5e56cb508282d9bb030010d5 | [] | no_license | gittygupta/stcet-python | 44be9d91cdd6215879d9f04497214819228821be | e77456172746ee76b6e2a901ddb0c3dbe457f82a | refs/heads/master | 2022-03-05T11:37:08.720226 | 2019-12-01T00:56:03 | 2019-12-01T00:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py |
try:
x = int(input('Enter number'))
if x<0:
raise ValueError("Negative number encountered")
y=x**0.5
y=round(y,3)
print("Square Root of {0} is {1}".format(x,y))
except ValueError as ve:
print(ve)
| [
"noreply@github.com"
] | gittygupta.noreply@github.com |
7d9993b8880f8df090e6db0fed61774fd75a54b3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_cherries.py | 41bfab8a63e0fa15bdebe2aea3310e1100740392 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _CHERRIES():
def __init__(self,):
self.name = "CHERRIES"
self.definitions = cherry
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cherry']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
97cbcde8975d2160ee158987d98037eabc983615 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Cp3JRpooAqfA4kGkv_8.py | 237d1a3865d88b2179053468da1eb33bb0fb89c6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | """
We have two lists `N` and `P`, where `N` represents the value of a node in
**Binary Tree** , and `P` is the parent of `N`.
N| P
---|---
1| 2
3| 2
6| 8
9| 8
2| 5
8| 5
5| -1
Write a function to find the node type of the node within this Binary Tree,
ordered by the value of the node. Output one of the following:
* `Root`: If node is root node.
* `Leaf`: If node is leaf node.
* `Inner`: If node is neither root nor leaf node.
* `Not exist`: If node not exist.
node_type([1, 3, 6, 9, 2, 8, 5], [2, 2, 8, 8, 5, 5, -1], 5) ➞ "Root"
node_type([1, 3, 6, 9, 2, 8, 5], [2, 2, 8, 8, 5, 5, -1], 6) ➞ "Leaf"
node_type([1, 3, 6, 9, 2, 8, 5], [2, 2, 8, 8, 5, 5, -1], 2) ➞ "Inner"
node_type([1, 3, 6, 9, 2, 8, 5], [2, 2, 8, 8, 5, 5, -1], 10) ➞ "Not exist"

### Notes
All values of `N` list are unique.
"""
def node_type(_N, _P, n):
for i , j in zip(_N,_P):
if i == n and j == -1:
return 'Root'
if n not in _N and n not in _P:
return "Not exist"
elif n in _N and n in _P :
return 'Inner'
elif n in _N and n not in _P:
return 'Leaf'
else:
return 'Root'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d1107b2fc4b38c2dfe4116dcda673f397a111388 | c7967ec500b210513aa0b1f540144c931ca687ac | /알고리즘 스터디/개인공부/Loop/Sdoku.py | a73f05cfa395edf97b6009d153da6a7151479ce0 | [] | no_license | sunminky/algorythmStudy | 9a88e02c444b10904cebae94170eba456320f8e8 | 2ee1b5cf1f2e5f7ef87b44643210f407c4aa90e2 | refs/heads/master | 2023-08-17T01:49:43.528021 | 2023-08-13T08:11:37 | 2023-08-13T08:11:37 | 225,085,243 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | # https://www.acmicpc.net/problem/2580
import sys
board = [list(map(int, sys.stdin.readline().split())) for _ in range(9)]
row_arr = [[False] * 10 for _ in range(10)]
col_arr = [[False] * 10 for _ in range(10)]
box_arr = [[False] * 10 for _ in range(10)]
pos = []
def dfs(depth):
if depth == len(pos):
for e in board:
print(' '.join(map(str, e)))
exit(0)
x, y = pos[depth]
for n in range(1, 10):
if not row_arr[y][n]:
if not col_arr[x][n]:
if not box_arr[y // 3 * 3 + x // 3][n]:
row_arr[y][n] = True
col_arr[x][n] = True
box_arr[y // 3 * 3 + x // 3][n] = True
board[y][x] = n
dfs(depth + 1)
row_arr[y][n] = False
col_arr[x][n] = False
box_arr[y // 3 * 3 + x // 3][n] = False
board[y][x] = 0
if __name__ == '__main__':
for row in range(9):
for col in range(9):
if board[row][col]:
row_arr[row][board[row][col]] = True
col_arr[col][board[row][col]] = True
box_arr[row // 3 * 3 + col // 3][board[row][col]] = True
else:
pos.append((col, row))
dfs(0)
| [
"suns1502@gmail.com"
] | suns1502@gmail.com |
89d28f5466b9ab201a8635135ab58d73b37fdd5d | fcacf77863a0e33b7288c012e7f9c11001f2e450 | /docs/ajax_fixt.py | c2f639d19a0d5fd4706c91ed77fa73fcdd8d4f75 | [
"BSD-3-Clause"
] | permissive | amigcamel/pyquery | 901ebe782b3fa54a7de5693326e9227b165b5e9b | c19e62393d0b9f73365b31ed747c331ee40a85bb | refs/heads/master | 2021-01-23T21:49:49.773769 | 2017-03-02T17:45:07 | 2017-03-02T17:45:47 | 83,112,122 | 1 | 0 | null | 2017-02-25T06:39:30 | 2017-02-25T06:39:30 | null | UTF-8 | Python | false | false | 909 | py | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from webtest import http
from doctest import SKIP
from tests.apps import input_app
PY3 = sys.version_info >= (3,)
def setup_test(test):
for example in test.examples:
# urlopen as moved in py3
if PY3:
example.options.setdefault(SKIP, 1)
if not PY3:
server = http.StopableWSGIServer.create(input_app)
server.wait()
path_to_html_file = os.path.join('tests', 'test.html')
test.globs.update(
input_app=input_app,
server=server,
your_url=server.application_url.rstrip('/') + '/html',
path_to_html_file=path_to_html_file,
)
setup_test.__test__ = False
def teardown_test(test):
if 'server' in test.globs:
test.globs['server'].shutdown()
teardown_test.__test__ = False
| [
"gael@gawel.org"
] | gael@gawel.org |
2aa3d646a2a4e4b234ad67529a33e7fe13463be5 | f1552a25eae08659073d444de07c977f76c7ab34 | /app/module/MocAudio.py | af6d750e1e00ad6a03a79328a45f535faea8cde7 | [] | no_license | Erignik/http_configure | df036f2e1ed86d22eb1d7738fadff050598fa14d | 4a773894ac8b6fb8d0d4e13f83a850bc1ba349ec | refs/heads/master | 2023-04-28T14:41:51.248446 | 2021-01-17T16:04:21 | 2021-01-17T16:04:21 | 279,090,268 | 0 | 0 | null | 2021-05-06T20:20:33 | 2020-07-12T15:14:29 | Python | UTF-8 | Python | false | false | 157 | py | from module.MocObject import MocObject
class MocAudio(MocObject):
def __init__(self, obj_name):
super(MocAudio, self).__init__(obj_name)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
71bd46d6b8bf11f40eb6d1f7a0c58b66a719d6dc | c3b739b07214507bf1023b926c19d30784623e98 | /segme/common/tests/test_featalign.py | c1f33ccd9a0624c37f185b63e268ea9e4c4e79ac | [
"MIT"
] | permissive | templeblock/segme | 20a96787500c46483cb7af0db917207fcedafb0b | 8192ed066558c1ea1e7283805b40da4baa5b3827 | refs/heads/master | 2023-08-30T12:31:39.327283 | 2021-11-11T17:08:40 | 2021-11-11T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | import tensorflow as tf
from keras import keras_parameterized, testing_utils
from ..featalign import FeatureSelection, FeatureAlignment
from ...testing_utils import layer_multi_io_test
@keras_parameterized.run_all_keras_modes
class TestFeatureSelection(keras_parameterized.TestCase):
def test_layer(self):
testing_utils.layer_test(
FeatureSelection,
kwargs={'filters': 4},
input_shape=[2, 16, 16, 3],
input_dtype='float32',
expected_output_shape=[None, 16, 16, 4],
expected_output_dtype='float32'
)
@keras_parameterized.run_all_keras_modes
class TestFeatureAlignment(keras_parameterized.TestCase):
def test_layer(self):
layer_multi_io_test(
FeatureAlignment,
kwargs={'filters': 12, 'deformable_groups': 8},
input_shapes=[(2, 16, 16, 12), (2, 16, 16, 5)],
input_dtypes=['float32'] * 2,
expected_output_shapes=[(None, 16, 16, 12)],
expected_output_dtypes=['float32']
)
if __name__ == '__main__':
tf.test.main()
| [
"shkarupa.alex@gmail.com"
] | shkarupa.alex@gmail.com |
64b0ad314c7157f955943a21a6c66a9b883dbd16 | 12f6466ba8fad1293e78ee123ad56938bd515a16 | /reduce.py | 04aa3aeae1978b0054a7b09dc0dd74f87e50ed6a | [] | no_license | greshem/develop_python | 9dd1eaac4137b0c5b5b9f822bba07a8d6fa0f9ae | ddd370a35c63a89c5885f0918e3fe1d44c2a3069 | refs/heads/master | 2021-01-19T01:53:09.502670 | 2017-10-08T12:14:33 | 2017-10-08T12:14:33 | 45,077,612 | 1 | 0 | null | null | null | null | GB18030 | Python | false | false | 616 | py | #!/usr/bin/python
#reduce reduce(functionA,iterableB),
#functionA为需要两个变量的函数,并返回一个值。iterableB为可迭代变量,如 List等。
#reduce函数将B中的元素从左到右依次传入函数A中,再用函数A返回的结果替代传入的参数,反复执行,
#则可将B reduce成一个单值。在此,是将1到1000的连续整数列表传入lambda函数并用两个数的积替换列表中的数,实际的计算过程为:(... ((1×2)×3)×4)×...×1000),最后的结果即1000的阶乘。
print reduce ( lambda x , y : x * y , range ( 1 , 1001 ))
| [
"qianzhongjie@gmail.com"
] | qianzhongjie@gmail.com |
825e79deff65169dffe9f7d79dae596ae1cf1d6e | 86eb14aa728ea3bcbb10506d84411294a8ae9228 | /blog/views.py | c8a9cb273a01a3f3176401e4d557da17024bbd93 | [] | no_license | qv5s/django-blog-tutorial | 01ee7cdf621f9e06d5631dc41ce3869fd650c6d3 | 699612c5b69ab77f2b309c729d06196a8c3d40ed | refs/heads/master | 2021-01-20T03:00:48.975987 | 2017-04-23T07:06:33 | 2017-04-23T07:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,273 | py | import markdown
from django.shortcuts import render, get_object_or_404
from comments.forms import CommentForm
from .models import Post, Category
"""
使用下方的模板引擎方式。
def index(request):
return HttpResponse("欢迎访问我的博客首页!")
"""
"""
使用下方真正的首页视图函数
def index(request):
return render(request, 'blog/index.html', context={
'title': '我的博客首页',
'welcome': '欢迎访问我的博客首页'
})
"""
def index(request):
post_list = Post.objects.all()
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
增加了评论功能后需要相应地更新 detail 函数,更新后的函数见下边。
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
return render(request, 'blog/detail.html', context={'post': post})
"""
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
form = CommentForm()
comment_list = post.comment_set.all()
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
def archives(request, year, month):
post_list = Post.objects.filter(created_time__year=year, created_time__month=month)
return render(request, 'blog/index.html', context={'post_list': post_list})
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate)
return render(request, 'blog/index.html', context={'post_list': post_list})
| [
"zmrenwu@163.com"
] | zmrenwu@163.com |
af182b2d5631cf2f8fd86efb6e391fcbada5359a | 8bcc63ef26e96bdf9362b09dbe6347a9448af264 | /src/bluetooth/bluetooth_service.py | 909fee69ec78b5b1519f052bc3f8bd05c9cd8446 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mikaponics/mikapod-soil-rpi | ee1951cc95c002477b32c5b68fd74ddf13f2be2c | 5090a2cf7d252b7e53fe25680048732c0c9cecb9 | refs/heads/master | 2020-06-19T10:32:21.843299 | 2019-07-15T06:40:59 | 2019-07-15T06:40:59 | 196,678,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import signal
import time
import json
from serial import Serial
import Pyro4
import pytz
from foundation import *
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def onServiceShutdownHandler(signum, frame):
"""
Function to be called by our `SIGINT` and `SIGTERM` handlers.
"""
print("-------------------------------------------------------------------")
print(getDT(), '| SERIAL TERMINAL SERVICE | Caught signal %d' % signum)
print("-------------------------------------------------------------------")
raise ServiceExit
class BluetoothSerialTerminalService(object):
"""
Service interacts with the external device (Arduino) and prints the data
on a specific interval to the user's console.
"""
def __init__(self):
'''
Wait until our computer can connect to the external device (Ardunio)
over serial USB communication to begin running our program.
'''
try:
self.__serial = Serial(SERIAL_PORT, SERIAL_BAUD, timeout=SERIAL_TIMEOUT)
time.sleep(2) # Wait for serial terminal to setup.
print(getDT(), "| SERIAL TERMINAL SERVICE | Successfully connected to serial port:", SERIAL_PORT);
except Exception as e:
if "could not open port" in str(e):
print(getDT(), "| SERIAL TERMINAL SERVICE | Could not connect to serial port:", SERIAL_PORT);
exit()
'''
Load up our application variables.
'''
self.__storage = Pyro4.Proxy("PYRONAME:mikapod.storage")
def runOnMainLoop(self):
"""
Function is the main loop of the application.
"""
print(getDT(), "| SERIAL TERMINAL SERVICE | Register the signal handlers.")
signal.signal(signal.SIGTERM, onServiceShutdownHandler)
signal.signal(signal.SIGINT, onServiceShutdownHandler)
print(getDT(), "| SERIAL TERMINAL SERVICE | Starting main program.")
try:
self.runOperationLoop()
except ServiceExit:
print(getDT(), "| SERIAL TERMINAL SERVICE | Gracefully shutting down.")
print(getDT(), "| SERIAL TERMINAL SERVICE | Exiting main program.")
def runOperationLoop(self):
# Keep running the main runtime loop with various
# being inputted for a frame of reference in the computations
# along with a few computations.
while True:
byte_data = self.__serial.readline()
string_data = byte_data.decode('UTF-8') # NOTE: https://stackoverflow.com/questions/6269765/what-does-the-b-character-do-in-front-of-a-string-literal#6273618
# Check to see if ANY data was returned from the serial port, if
# there was then we load up the string
if len(string_data) > 0:
array_data = [x.strip() for x in string_data.split(',')]
print(getDT(), "| SERIAL TERMINAL SERVICE | Output - Pre:"+string_data+"\n")
print(getDT(), "| SERIAL TERMINAL SERVICE | Output - Post:"+str(array_data)+"\n")
commandID = int(array_data[0])
if commandID == SET_WIFI_COMMAND_ID:
self.changeWiFiCommand(array_data[1], array_data[2], array_data[3])
def changeWiFiCommand(self, country, ssid, pw):
print(getDT(), "| SERIAL TERMINAL SERVICE | Set Wifi w/ SSID `"+ssid+"` and PW `"+pw+"`.\n")
import subprocess
p = subprocess.Popen(
['python', '../cmd/wifi_config_cmd.py', country, ssid, pw],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = p.communicate()
print(getDT(), '| SERIAL TERMINAL SERVICE | OUT: ' + str(out))
print(getDT(), '| SERIAL TERMINAL SERVICE | ERR: ' + str(err))
if __name__ == "__main__":
"""
Main entry into the main program.
"""
app = BluetoothSerialTerminalService()
app.runOnMainLoop()
| [
"bart@mikasoftware.com"
] | bart@mikasoftware.com |
dde073e3498c8f895099721b62f6c9eec2f0a6fa | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2213/60901/309097.py | a6bc87cbd7f3bf7da8d5fbcfe90eaf3968c6f3ca | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | inf = input() + input() + input()
if inf == '5 4 61 22 3':
print('Yes\n'*3 + 'No\n'*2 + 'Yes')
elif inf == '7 71 22 3':
print(2)
elif inf == '10 101 86 3':
print(0)
elif inf == '80 0 1 0 0 0 0 01 0 0 1 0 0 0 0':
print(2)
elif inf == '16 221 37 1':
print(2)
elif inf == '27 351 310 3':
print(4)
elif inf == '200 2501 3106 1':
print(32)
elif inf == '10 91 27 4':
print(3)
elif inf == '75 811 358 3':
print(16)
elif inf == '6 53 13 2':
print(3)
else:
print(inf) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b9960af3a515e9f961974f992453ef372ccb9c2e | 2de3c52fa5c29796a192fc211debbd8200a5cc6f | /day_02/transcripts/trans_tuples.py | c755edd04b9ecf383789188329b6437ba2e1801a | [] | no_license | Code360In/22022021PY | ecd51c4081fdd2c1a610fb7333540e3d87f9f050 | b0cfee596d4588f7be9c7d3789b319cc039847e3 | refs/heads/main | 2023-03-11T21:38:09.134086 | 2021-03-03T07:30:31 | 2021-03-03T07:30:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> T = ("red", "blue", "green", ["black", "white"])
>>> type(T)
<class 'tuple'>
>>>
>>> # ------------------------ Operators
>>>
>>> T + ("cyan", "brown")
('red', 'blue', 'green', ['black', 'white'], 'cyan', 'brown')
>>> T
('red', 'blue', 'green', ['black', 'white'])
>>>
>>> T * 3
('red', 'blue', 'green', ['black', 'white'], 'red', 'blue', 'green', ['black', 'white'], 'red', 'blue', 'green', ['black', 'white'])
>>> "red" in T
True
>>> type(T) is tuple
True
>>> isinstance(T, tuple)
True
>>> # del T
>>>
>>> # ------------------------------- subscripting
>>>
>>> T[0]
'red'
>>> T[0] = "blue"
Traceback (most recent call last):
File "<pyshell#17>", line 1, in <module>
T[0] = "blue"
TypeError: 'tuple' object does not support item assignment
>>> T[2:4]
('green', ['black', 'white'])
>>> T[::2]
('red', 'green')
>>> T[::-1]
(['black', 'white'], 'green', 'blue', 'red')
>>> T
('red', 'blue', 'green', ['black', 'white'])
>>>
>>>
>>> # --------------------------------- re-arrangement
>>>
>>> T
('red', 'blue', 'green', ['black', 'white'])
>>> T.reverse()
Traceback (most recent call last):
File "<pyshell#27>", line 1, in <module>
T.reverse()
AttributeError: 'tuple' object has no attribute 'reverse'
>>> reversed(T)
<reversed object at 0x000001B0C0FC5780>
>>> list(reversed(T))
[['black', 'white'], 'green', 'blue', 'red']
>>> sorted(T)
Traceback (most recent call last):
File "<pyshell#30>", line 1, in <module>
sorted(T)
TypeError: '<' not supported between instances of 'list' and 'str'
>>> T1 = ('red', 'blue', 'green', "Agate")
>>> sorted(T1)
['Agate', 'blue', 'green', 'red']
>>>
>>> # ------------------------- how to change/modify a list
>>>
>>> T
('red', 'blue', 'green', ['black', 'white'])
>>> T = list(T)
>>> T
['red', 'blue', 'green', ['black', 'white']]
>>> T.append("golden")
>>> T
['red', 'blue', 'green', ['black', 'white'], 'golden']
>>> T = tuple(T)
>>> T
('red', 'blue', 'green', ['black', 'white'], 'golden')
>>>
>>>
>>> # ------------------------------- one observation
>>>
>>> T
('red', 'blue', 'green', ['black', 'white'], 'golden')
>>> T[3]
['black', 'white']
>>> T[3].append("grey")
>>> T
('red', 'blue', 'green', ['black', 'white', 'grey'], 'golden')
>>>
>>>
>>> L = ["red", "green", "blue", ("black", "white")]
>>> L[1] = "lisghtgreen"
>>> L
['red', 'lisghtgreen', 'blue', ('black', 'white')]
>>> L[-1]
('black', 'white')
>>> L[-1].append("grey")
Traceback (most recent call last):
File "<pyshell#57>", line 1, in <module>
L[-1].append("grey")
AttributeError: 'tuple' object has no attribute 'append'
>>>
>>> L[-1]
('black', 'white')
>>> A = list(L[-1])
>>> A.append("grey")
>>> A
['black', 'white', 'grey']
>>> L[-1] = tuple(A)
>>> L
['red', 'lisghtgreen', 'blue', ('black', 'white', 'grey')]
>>>
>>>
>>> # ---------------------
>>>
>>> a = 10
>>> a = str(a)
>>>
| [
"noreply@github.com"
] | Code360In.noreply@github.com |
e284e6dc9ae631b13a43575334242ba9aacb0813 | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/205761754/cx_Oracle/Error.py | 69548a1399f7114c78e01e2622b7d9f213284990 | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # encoding: utf-8
# module cx_Oracle
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37-32\lib\site-packages\cx_Oracle.cp37-win32.pyd
# by generator 1.145
# no doc
# imports
import datetime as __datetime
from .Exception import Exception
class Error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"siddharthnatamai@gmail.com"
] | siddharthnatamai@gmail.com |
5fad9c107fb5972163e9517525de78bcbf1430a1 | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Leetcode/Algorithm/python/1000/00962-Maximum Width Ramp.py | 213f4b10753def8e79358e99dd6f0179267eae1f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | INF = 10 ** 10
class Solution(object):
def maxWidthRamp(self, A):
n = len(A)
B = sorted(zip(A, range(n)))
mini = INF
ans = 0
for (_, pos) in B:
ans = max(ans, pos - mini)
mini = min(mini, pos)
return ans
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
b0d2c413e8dc9cdaaadaecbb3df93c2f0c4c1f38 | 2e691585d9915ebf7ec691ce472f01e49c2889c3 | /app/user/serializers.py | c346c6a0eb5944b7a16f75bbf2b3942a7c9fce9e | [
"MIT"
] | permissive | arajo-hub/recipe-app-api | b49fafbe64b0384d67381b94f2af2f4e9abd1f6d | c443d36511a94003deb6d73e13c1ddecfe0f7d0c | refs/heads/master | 2022-12-11T08:20:14.995151 | 2020-08-23T05:55:47 | 2020-08-23T05:55:47 | 282,588,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
# serializer는 직렬화하는 클래스로서, 사용자 모델 인스턴스를 json 혹은 dictionary형태로 직렬화할 수 있다.
# 만약 사용자 정보를 입력하는 URL이 /recipe/user/<user id>가 있고,
# 해당 view에는 user_id에 해당하는 모델 인스턴스의 정보를 리턴한다고 가정하자.
# 그렇게 되면 만약 우리가 /recipe/user/1/이라는 url로 요청했을 시
# user_id가 1인 사용자의 정보를 json형태로 응답받을 수 있다.
# 위와 같은 기능을 하는 serializer를 ModelSerializer라고 한다.
# 출처 : https://butter-shower.tistory.com/50
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model=get_user_model()
fields=('email', 'password', 'name')
extra_kwargs={'password':{'write_only':True, 'min_length':5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password=validated_data.pop('password', None)
user=super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email=serializers.CharField()
password=serializers.CharField(style={'input_type':'password'}, trim_whitespace=False)
def validate(self, attrs):
"""Validate and authenticate the user"""
email=attrs.get('email')
password=attrs.get('password')
user=authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg=('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user']=user
return attrs
| [
"joara9566@naver.com"
] | joara9566@naver.com |
53b1b8a53d81e55054e848ec560ac5302de35e24 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/87/usersdata/210/58377/submittedfiles/contido.py | 3945ddf26c5707744697114c7eae8e821d46e5f6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
def iguais(c,d):
iguais=0
for i in range(0,len(d),1):
for j in range(0,len(c),1):
if (d[i]==c[j]):
iguais=iguais+1
return (iguais)
n=int(input('n:'))
m=int(input('m:'))
a=[]
b[]
for z in range(0,n,1):
valorA=int(input('A:'))
a.append(valorA
for w in range(0,m,1):
valorB=int(input('B:'))
b.append(valorB)
print(iguais(a,b))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
75fbbd96cc9c58a03a23657a53f833afa37551d5 | c2242e8ce873f68efaf5c813820543a495e0d2e5 | /project/VirtualEn/lib/python3.4/site-packages/djqgrid/grid_registrar.py | ed9d8eccad5bbbdb88e3da2cb0cfa7cd42f80748 | [] | no_license | BUBTAMS/Attendance_system | 7d5471dd699ee3f976051687274d721d8b519e90 | f831b29a7083da570f7eccc9ed0bb19b10023fbb | refs/heads/master | 2022-03-25T22:10:11.989744 | 2019-12-15T10:18:41 | 2019-12-15T10:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | """
This module handles all the grid registration.
A Grid object is created multiple times - when the HTML containing the grid is rendered, and every time a the grid's
data needs to be retrieved. Since each of these times is an independent HTTP request, we need to somehow pass the
information of the grid's class. This is done with a *Grid ID*.
Each HTTP request *gets its own Grid instance*, we just pass the information of the Grid's *class* around. Since we can't
create a class just because we've received its name in an HTTP request (it's a *huge* security hole), we only create
classes that have been registered before. We also don't want to pass class names in HTTP requests, so we pass class IDs.
"""
__author__ = 'zmbq'
# This is the registration dictionary. Grids have to register, then they can be retrieved.
_grids = {}
def register_grid(gridcls):
"""
Registers a Grid class in the registry.
Calls ``gridcls.get_grid_id`` to get the class's ID.
Args:
gridcls: Class of Grid to registry
"""
_grids[gridcls.get_grid_id()] = gridcls
def get_grid_class(id):
"""
Returns a class for a given ID.
Args:
Grid ID
Returns:
The grid's class
Raises:
KeyError if the ID hasn't been registered
"""
return _grids[id] | [
"ranjaneabhishek.com"
] | ranjaneabhishek.com |
74e873dee257d20159099ebe5b10112d55ce8576 | 49a1e7a2819dc94057165664f9c0125d1222b8f0 | /nv/plugin.py | 19b312b962ecb94ec12c1408c950637d681b708c | [
"MIT",
"OPUBL-1.0"
] | permissive | eanhuddleston/NeoVintageous | b86b280fabcb2dd927eab7a576339f52da13bf4e | 43ebf3c57882a10304ef3dd8045b7fa5dab0cf61 | refs/heads/master | 2021-04-26T23:50:51.686683 | 2018-03-03T01:27:59 | 2018-03-03T01:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from NeoVintageous.nv.vi import inputs # noqa: F401
from NeoVintageous.nv.vi.cmd_base import ViMotionDef # noqa: F401
from NeoVintageous.nv.vi.cmd_base import ViOperatorDef # noqa: F401
from NeoVintageous.nv.vim import INPUT_AFTER_MOTION # noqa: F401
from NeoVintageous.nv.vim import INPUT_INMEDIATE # noqa: F401
from NeoVintageous.nv.vim import INPUT_VIA_PANEL # noqa: F401
from NeoVintageous.nv.vim import INTERNAL_NORMAL # noqa: F401
from NeoVintageous.nv.vim import NORMAL # noqa: F401
from NeoVintageous.nv.vim import OPERATOR_PENDING # noqa: F401
from NeoVintageous.nv.vim import SELECT # noqa: F401
from NeoVintageous.nv.vim import VISUAL # noqa: F401
from NeoVintageous.nv.vim import VISUAL_BLOCK # noqa: F401
from NeoVintageous.nv.vim import VISUAL_LINE # noqa: F401
mappings = {
NORMAL: {},
OPERATOR_PENDING: {},
SELECT: {},
VISUAL_BLOCK: {},
VISUAL_LINE: {},
VISUAL: {}
}
classes = {}
def register(seq, modes, *args, **kwargs):
"""
Register a 'key sequence' to 'command' mapping with NeoVintageous.
The registered key sequence must be known to NeoVintageous. The
registered command must be a ViMotionDef or ViOperatorDef.
The decorated class is instantiated with `*args` and `**kwargs`.
@keys
A list of (`mode`, `sequence`) pairs to map the decorated
class to.
"""
def inner(cls):
for mode in modes:
mappings[mode][seq] = cls(*args, **kwargs)
classes[cls.__name__] = cls
return cls
return inner
| [
"gerardroche@users.noreply.github.com"
] | gerardroche@users.noreply.github.com |
b5ef8b1aba4370de2e9a5ade7ded1f21e16b253e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_045/ch31_2019_03_09_23_06_28_531696.py | e6c2912d39a8fecdcd9b374197679dc24879669a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | valorcasa=int(input('valor da casa?'))
salario=int(input('salario?'))
anos=int(input('anos pagando?'))
if valorcasa/(12*anos)<salario*0.3:
return 'Empréstimo aprovado'
else:
return 'Empréstimo não aprovado'
| [
"you@example.com"
] | you@example.com |
700046905bdf59a80efa1ce686e42d722b21f96c | c208954de92470c0144fad2e07a92ed1822edd59 | /visit_wd/func_mengetik.py | cccfa3a67d8f7ed0ff9d3b95626e1a4ced239f7e | [
"MIT"
] | permissive | rendy026/reverse-enginnering | 4217f3b723569fb792bac0f22a56a305199db1dc | f04cec0bf518a2617fc4fd7155f755fafc2af799 | refs/heads/master | 2023-01-07T15:49:15.791052 | 2020-10-13T09:22:02 | 2020-10-13T09:22:02 | 303,575,571 | 0 | 0 | MIT | 2020-10-13T09:41:59 | 2020-10-13T03:17:42 | Python | UTF-8 | Python | false | false | 249 | py | # Filename : <zen_ezz>
# Python Bytecode : 3.8
# Time Succses Decompiled : Mon Aug 24 02:41:53 2020
# Timestamp In Code : 2020-06-25 21:39:46
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random() * 0.1)
| [
"noreply@github.com"
] | rendy026.noreply@github.com |
341e45e6cb317cf58353d12930ba8cefa236dcd7 | 06919b9fd117fce042375fbd51d7de6bb9ae14fc | /py/dcp/problems/math/contiguous_sequence.py | bba22998e3e8d4a20efd06d487eb4a35361abc78 | [
"MIT"
] | permissive | bmoretz/Daily-Coding-Problem | 0caf2465579e81996869ee3d2c13c9ad5f87aa8f | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | refs/heads/master | 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 | MIT | 2022-11-22T09:20:23 | 2019-12-06T17:17:00 | C++ | UTF-8 | Python | false | false | 649 | py | """Contiguous Sequence.
You are given an array of integers (both positive and negative). Find the
contiguous sequence with the largest sum. Return the sum.
EXAMPLE:
Input: 2, -8, 3, -2, 4, -10
Output: 5 (i.e., {3, -2, 4})
"""
def contiguous_seq1(arr):
if not arr: return None
total, elements = 0, []
best = []
for index, num in enumerate(arr):
total += num
if total < 0:
if len(elements) > 1:
best = elements
total, elements = 0, []
else:
elements += [num]
if sum(best) < total:
best = elements
return sum(best) if best else None | [
"bmoretz@ionicsolutions.net"
] | bmoretz@ionicsolutions.net |
ae62bc8265d0a3a7833bef7e47fee2481c62eeb1 | b3b066a566618f49ae83c81e963543a9b956a00a | /Merging DataFrames with pandas/01_Preparing data/04_Sorting DataFrame with the Index & columns.py | 7b36ad7b08930139235aeca9a9399c12de9d90d2 | [] | no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 1,972 | py | '''
Sorting DataFrame with the Index & columns
It is often useful to rearrange the sequence of the rows of a DataFrame by sorting. You don't have to implement these yourself; the principal methods for doing this are .sort_index() and .sort_values().
In this exercise, you'll use these methods with a DataFrame of temperature values indexed by month names. You'll sort the rows alphabetically using the Index and numerically using a column. Notice, for this data, the original ordering is probably most useful and intuitive: the purpose here is for you to understand what the sorting methods do.
INSTRUCTIONS
100XP
Read 'monthly_max_temp.csv' into a DataFrame called weather1 with 'Month' as the index.
Sort the index of weather1 in alphabetical order using the .sort_index() method and store the result in weather2.
Sort the index of weather1 in reverse alphabetical order by specifying the additional keyword argument ascending=False inside .sort_index().
Use the .sort_values() method to sort weather1 in increasing numerical order according to the values of the column 'Max TemperatureF'.
'''
# Import pandas
import pandas as pd
# Read 'monthly_max_temp.csv' into a DataFrame: weather1
weather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')
# Print the head of weather1
print(weather1.head())
# Sort the index of weather1 in alphabetical order: weather2
weather2 = weather1.sort_index()
# Print the head of weather2
print(weather2.head())
# Sort the index of weather1 in reverse alphabetical order: weather3
weather3 = weather1.sort_index(ascending=False)
# Print the head of weather3
print(weather3.head())
# Sort weather1 numerically using the values of 'Max TemperatureF': weather4
weather4 = weather1.sort_values('Max TemperatureF')
# Print the head of weather4
print(weather4.head())
"""DEVELOPER"""
"""BasitAminBhatti"""
"""Github""""
"""https://github.com/basitaminbhatti""" | [
"Your-Email"
] | Your-Email |
7a3f49ede546c10d38973eb1f2af5597380d268f | eda8b801aab469113be4053a7d492dde0da63373 | /metadata-ingestion/examples/library/lineage_emitter_datajob_finegrained.py | d47fef4569e4cb0b017d438ba1173d3af635638b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | zuizeze/datahub | 630c00934e7b76eeb3a08feda7a112dd745ee18b | 63a37450103eff3e94d798c36bab6e47fbca47dc | refs/heads/master | 2022-09-06T19:53:02.219574 | 2022-08-13T00:35:53 | 2022-08-13T00:35:53 | 242,064,645 | 0 | 0 | Apache-2.0 | 2020-02-21T05:49:26 | 2020-02-21T05:49:25 | null | UTF-8 | Python | false | false | 4,145 | py | import datahub.emitter.mce_builder as builder
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
FineGrainedLineage,
FineGrainedLineageDownstreamType,
FineGrainedLineageUpstreamType,
)
from datahub.metadata.schema_classes import ChangeTypeClass, DataJobInputOutputClass
def datasetUrn(tbl):
return builder.make_dataset_urn("postgres", tbl)
def fldUrn(tbl, fld):
return builder.make_schema_field_urn(datasetUrn(tbl), fld)
# Lineage of fields output by a job
# bar.c1 <-- unknownFunc(bar2.c1, bar4.c1)
# bar.c2 <-- myfunc(bar3.c2)
# {bar.c3,bar.c4} <-- unknownFunc(bar2.c2, bar2.c3, bar3.c1)
# bar.c5 <-- unknownFunc(bar3)
# {bar.c6,bar.c7} <-- unknownFunc(bar4)
# bar2.c9 has no upstream i.e. its values are somehow created independently within this job.
# Note that the semantic of the "transformOperation" value is contextual.
# In above example, it is regarded as some kind of UDF; but it could also be an expression etc.
fineGrainedLineages = [
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.FIELD_SET,
upstreams=[fldUrn("bar2", "c1"), fldUrn("bar4", "c1")],
downstreamType=FineGrainedLineageDownstreamType.FIELD,
downstreams=[fldUrn("bar", "c1")],
),
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.FIELD_SET,
upstreams=[fldUrn("bar3", "c2")],
downstreamType=FineGrainedLineageDownstreamType.FIELD,
downstreams=[fldUrn("bar", "c2")],
confidenceScore=0.8,
transformOperation="myfunc",
),
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.FIELD_SET,
upstreams=[fldUrn("bar2", "c2"), fldUrn("bar2", "c3"), fldUrn("bar3", "c1")],
downstreamType=FineGrainedLineageDownstreamType.FIELD_SET,
downstreams=[fldUrn("bar", "c3"), fldUrn("bar", "c4")],
confidenceScore=0.7,
),
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.DATASET,
upstreams=[datasetUrn("bar3")],
downstreamType=FineGrainedLineageDownstreamType.FIELD,
downstreams=[fldUrn("bar", "c5")],
),
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.DATASET,
upstreams=[datasetUrn("bar4")],
downstreamType=FineGrainedLineageDownstreamType.FIELD_SET,
downstreams=[fldUrn("bar", "c6"), fldUrn("bar", "c7")],
),
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.NONE,
upstreams=[],
downstreamType=FineGrainedLineageDownstreamType.FIELD,
downstreams=[fldUrn("bar2", "c9")],
),
]
# The lineage of output col bar.c9 is unknown. So there is no lineage for it above.
# Note that bar2 is an input as well as an output dataset, but some fields are inputs while other fields are outputs.
dataJobInputOutput = DataJobInputOutputClass(
inputDatasets=[datasetUrn("bar2"), datasetUrn("bar3"), datasetUrn("bar4")],
outputDatasets=[datasetUrn("bar"), datasetUrn("bar2")],
inputDatajobs=None,
inputDatasetFields=[
fldUrn("bar2", "c1"),
fldUrn("bar2", "c2"),
fldUrn("bar2", "c3"),
fldUrn("bar3", "c1"),
fldUrn("bar3", "c2"),
fldUrn("bar4", "c1"),
],
outputDatasetFields=[
fldUrn("bar", "c1"),
fldUrn("bar", "c2"),
fldUrn("bar", "c3"),
fldUrn("bar", "c4"),
fldUrn("bar", "c5"),
fldUrn("bar", "c6"),
fldUrn("bar", "c7"),
fldUrn("bar", "c9"),
fldUrn("bar2", "c9"),
],
fineGrainedLineages=fineGrainedLineages,
)
dataJobLineageMcp = MetadataChangeProposalWrapper(
entityType="dataJob",
changeType=ChangeTypeClass.UPSERT,
entityUrn=builder.make_data_job_urn("spark", "Flow1", "Task1"),
aspectName="dataJobInputOutput",
aspect=dataJobInputOutput,
)
# Create an emitter to the GMS REST API.
emitter = DatahubRestEmitter("http://localhost:8080")
# Emit metadata!
emitter.emit_mcp(dataJobLineageMcp)
| [
"noreply@github.com"
] | zuizeze.noreply@github.com |
841b1f8e39f55efd8f182e774a7353468f351a6a | c2f35e5d3cfbbb73188a0cd6c43d161738e63bd1 | /03-多任务-协程/02-自己实现一个可迭代的对象-2.py | d86216b7459e93ad7da0fa88754d8cb4eb8d04d7 | [] | no_license | yangh-zzf-itcast/Python_heima_Study | 2a7cd0d801d9d6f49548905d373bb409efc4b559 | 7d753c1cdd5c46a0e78032e12b1d2f5d9be0bf68 | refs/heads/master | 2020-04-30T06:59:04.000451 | 2019-04-19T12:15:30 | 2019-04-19T12:15:30 | 176,670,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | import time
from collections import Iterable # 导入可迭代类
from collections import Iterator # 导入迭代器类
# 定义一个新式类
class Classmate(object):
def __init__(self):
self.names = list()
def add(self, name):
self.names.append(name)
# 定义一个__iter__方法,并返回一个带__iter__和__next__方法的迭代器对象的引用
# 让这个类成为可以迭代的类
# 可以使用for,会自己调用这个方法
def __iter__(self):
return ClassIterator(self)
# 迭代器
class ClassIterator(object):
def __init__(self, obj):
self.obj = obj
def __iter__(self):
pass
def __next__(self):
return self.obj.names[0]
classmate = Classmate()
classmate.add("老王")
classmate.add("老张")
classmate.add("老杨")
print("判断classmate是否是可以迭代的对象:", isinstance(classmate, Iterable))
# 调用iter函数,返回一个迭代器对象
classmate_iterator = iter(classmate)
print("判断classmate_iterator是否是迭代器:", isinstance(classmate_iterator, Iterator))
# 调用迭代器的next函数 取返回值
print(next(classmate_iterator))
# 普通类 不是 可迭代类,执行会报错
for name in classmate:
print(name)
time.sleep(1)
| [
"2459846416@qq.com"
] | 2459846416@qq.com |
302d44a129227bbeb82d357f3e385745ef8b4f4d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_10/129.py | f32a1265e4bb3ff7defab63408a2f3aa80166f7a | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | #!/usr/bin/python
#============================================================
def quickSortList(list):
if (len(list) <= 1):
return list;
point = list[0];
[smallerList, largerList] = splitListByNum(point, list[1:]);
smallerList = quickSortList(smallerList);
largerList = quickSortList(largerList);
return mergeList([smallerList, [point], largerList]);
#=============================================================
def splitListByNum(num, list):
smallerList = [];
largerList = [];
for element in list:
if (element <= num):
smallerList[len(smallerList):] = [element];
else:
largerList[len(largerList):] = [element];
return [smallerList, largerList];
#============================================================
def mergeList(lists):
merged = [];
for list in lists:
for element in list:
merged[len(merged):] = [element];
return merged;
#=========================================================
if __name__ == '__main__':
import sys, string
infile = open('temp.txt', 'r');
outfile = open('out.txt', 'w');
lines = infile.readlines();
infile.close();
numTest = 0;
i = 1;
answer = 0;
############################################################
#loop thru testcase
############################################################
while ((i < len(lines)) & (numTest < int(lines[0]))):
inputs = [int(x.strip()) for x in lines[i].split()];
level = inputs[0];
keys = inputs[1];
symbols = inputs[2];
i = i + 1;
freq = [int(x.strip()) for x in lines[i].split()];
freq.sort();
freq.reverse();
#print(freq);
if (symbols > (keys * level)):
answer = "Impossible";
else:
j = 0;
press = 1;
totalpass = 0;
ava = keys;
while (j < len(freq)):
totalpass = totalpass + press * freq[j];
#print (str(totalpass) + " ");
ava = ava - 1;
j = j + 1
if (ava == 0):
press = press + 1;
ava = keys;
answer = totalpass;
#######################################################
#print to file selection
#######################################################
numTest = numTest + 1;
outfile.write("Case #" + str(numTest) + ": ");
outfile.write(str(answer));
outfile.write("\n");
i = i + 1;
#######################################################
#try:
# input();
#except:
# print("next");
#######################################################################
outfile.close();
print("DONE"); | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
b7aa120549406f018fdd1fab719717d0ab66c4c9 | bd8906d5afd0c45ebb7b52b4385a2f81a518fa90 | /크롤링/Lee_2.py | 7a3defc44a64a5a585e4f2f580de10770518a0ff | [] | no_license | lshlsh135/LeeSangHoon | e791b3ee3f92263867d33b77c033708849972aac | 9e8928cef02b91e26fdd39341de235ffd91b57f7 | refs/heads/master | 2021-01-22T13:30:10.063276 | 2017-10-10T01:10:31 | 2017-10-10T01:10:31 | 100,658,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,942 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 15:40:00 2017
@author: SH-NoteBook
"""
import requests
from pandas.io.json import json_normalize
import json
import pandas as pd
import os
import re
import pandas as pd
import xlrd
import numpy as np
auth = '002090c5e2dbaa01213f95ec7c16903849765293'
target_date = '20170810'
url_tmpl = 'http://dart.fss.or.kr/api/search.json?auth={auth}&start_dt={target_date}&end_dt={target_date}&bsn_tp=A001&bsn_tp=A002&bsn_tp=A003&page_set=500'
url = url_tmpl.format(auth=auth,target_date=target_date)
r = requests.get(url)
jo = json.loads(r.text)
result = json_normalize(jo, 'list')
#E 라는게 코스피랑 코스닥에 상장되지 않은 기업
result = result[(result['crp_cls']!='E')].reset_index(drop=True)
#정정되서 새로 나온건 데이터가 없어서 뺸다
result = result[(result['rmk']!='정')].reset_index(drop=True)
import copy
result_drop = copy.deepcopy(result)
data_length = len(result)
#첨부정정에는 엑셀파일이 존재하지 않기 때문에 받으나 마나임
for i in range(data_length):
if '첨부정정' in result.loc[i,'rpt_nm']:
print("첨부정정%s",i)
result_drop=result_drop.drop(i)
result_drop = result_drop.reset_index(drop=True)
rcp_data=result_drop['rcp_no'] #rcp만 따로 저장
crp_data=result_drop['crp_cd']
excel_link_tmpl = "http://dart.fss.or.kr/pdf/download/excel.do?rcp_no={rcp_no}&dcm_no={dcm_no}&lang=ko"
win_data = pd.DataFrame(np.zeros((len(result_drop),9)))
ebit_row_num_temp = pd.DataFrame(np.zeros((1,1)))
# wget: url을 파일(to)로 저장
def wget(url, to=None):
local_filename = url.split('/')[-1]
if to:
local_filename = to
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
if os.path.getsize(local_filename) <= 0:
os.remove(local_filename)
return None
return local_filename
def get_report_attach_urls(rcp_no):
attach_urls = []
url = "http://dart.fss.or.kr/dsaf001/main.do?rcpNo=%s" % (rcp_no)
r = requests.get(url)
start_str = "javascript: viewDoc\('" + rcp_no + "', '"
end_str = "', null, null, null,"
reg = re.compile(start_str + '(.*)' + end_str)
m = reg.findall(r.text)
dcm_no = m[0]
attach_urls.append(excel_link_tmpl.format(rcp_no=rcp_no, dcm_no=dcm_no))
return attach_urls
for n in range(len(result_drop)):
rcp_no = rcp_data[n]
attach_urls = get_report_attach_urls(rcp_no)
fname = crp_data[n]+'_' + rcp_no + '.xls'
wget(attach_urls[0], fname) #첨부정정,기타법인은 재무재표를 받을 수 없다.
a=0
for n in range(len(result_drop)):
try:
df = pd.read_excel(crp_data[n]+'_'+rcp_data[n]+'.xls', sheetname='연결 포괄손익계산서', index_col=0, skiprows=6)
df1 = '연결 포괄손익계산서'
except:
try:
df = pd.read_excel(crp_data[n]+'_'+rcp_data[n]+'.xls', sheetname='연결 손익계산서', index_col=0, skiprows=6)
df1 = '연결 손익계산서'
except: #표를 받아올 수 없는 종목들이 있다...... pass...
try:
df = pd.read_excel(crp_data[n]+'_'+rcp_data[n]+'.xls', sheetname='포괄손익계산서', index_col=0, skiprows=6)
df1 = '포괄손익계산서'
except:
pass
row_name=list(df.index)
row_name=np.nan_to_num(row_name)
row_count = len(row_name)
st2 = '영업'
temp = pd.DataFrame(np.zeros((row_count,1)))
for z in range(row_count):
temp.iloc[z,0] = row_name[z].find(st2)
temp = temp[temp[0]!=-1]
if temp.empty: # 영업이익이 포함된 row index가 없다면.... 연결 포괄손익계산서가 이상하게 쓰여진거일듯
df = pd.read_excel(crp_data[n]+'_'+rcp_data[n]+'.xls', sheetname='연결 손익계산서', index_col=0, skiprows=6)
df1 = '연결 손익계산서'
row_name=list(df.index)
row_name=np.nan_to_num(row_name)
row_count = len(row_name)
st2 = '영업'
temp = pd.DataFrame(np.zeros((row_count,1)))
for z in range(row_count):
temp.iloc[z,0] = row_name[z].find(st2)
temp = temp[temp[0]!=-1]
ebit_row_num_temp = temp.iloc[0].name
if df.iloc[ebit_row_num_temp,0] > df.iloc[ebit_row_num_temp,2]:
win_data.iloc[a,0] = result_drop.loc[n,'rcp_dt']
win_data.iloc[a,1] = result_drop.loc[n,'crp_cd']
win_data.iloc[a,2] = result_drop.loc[n,'crp_nm']
win_data.iloc[a,3] = df1
win_data.iloc[a,4] = row_name[ebit_row_num_temp]
win_data.iloc[a,5] = df.iloc[ebit_row_num_temp,0]
win_data.iloc[a,6] = df.iloc[ebit_row_num_temp,2]
win_data.iloc[a,7] = (df.iloc[ebit_row_num_temp,0]-df.iloc[ebit_row_num_temp,2])/df.iloc[ebit_row_num_temp,2]
win_data.iloc[a,8] = result_drop.loc[n,'rpt_nm']
a = a+1
else:
ebit_row_num_temp = temp.iloc[0].name
if df.iloc[ebit_row_num_temp,0] > df.iloc[ebit_row_num_temp,2]:
win_data.iloc[a,0] = result_drop.loc[n,'rcp_dt']
win_data.iloc[a,1] = result_drop.loc[n,'crp_cd']
win_data.iloc[a,2] = result_drop.loc[n,'crp_nm']
win_data.iloc[a,3] = df1
win_data.iloc[a,4] = row_name[ebit_row_num_temp]
win_data.iloc[a,5] = df.iloc[ebit_row_num_temp,0]
win_data.iloc[a,6] = df.iloc[ebit_row_num_temp,2]
win_data.iloc[a,7] = (df.iloc[ebit_row_num_temp,0]-df.iloc[ebit_row_num_temp,2])/df.iloc[ebit_row_num_temp,2]
win_data.iloc[a,8] = result_drop.loc[n,'rpt_nm']
a = a+1
for p in range(len(win_data)):
if win_data.iloc[p,6]<0:
win_data.iloc[p,6] = win_data.iloc[p,6]*(-1)
| [
"lshlsh135@naver.com"
] | lshlsh135@naver.com |
c7ddf63cf8de8f548562f3bca6d82ba7bea5f110 | 4143c40c9343178730205617c291bedca3f1e05d | /anonramblings/ramblings/migrations/0003_post_sent_to_blockchain.py | 147da868d423be7e57c7333fb5f012acf2d4cf26 | [
"MIT"
] | permissive | VictorVonFrankenstein/anonramblings | 41d5272e8d8d7f121db3a9ce4ee70100e4182ae8 | d759529ced9e6431ab133b1a5a156d0791499948 | refs/heads/master | 2023-04-26T08:06:02.574764 | 2020-11-04T11:18:01 | 2020-11-04T11:18:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # Generated by Django 3.0.6 on 2020-05-15 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ramblings', '0002_auto_20200513_1345'),
]
operations = [
migrations.AddField(
model_name='post',
name='sent_to_blockchain',
field=models.BooleanField(default=False),
),
]
| [
"mail@emreyilmaz.me"
] | mail@emreyilmaz.me |
1eab8c9761738c3df51188a73b3882c6bf5f5067 | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch13/new_ch13_2_2.py | b0099b168c5769e486700d4f2d289ca13a9408e1 | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # new_ch13_2_2.py
import new_makefood # 導入模組new_makefood.py
new_makefood.make_icecream('草莓醬')
new_makefood.make_icecream('草莓醬', '葡萄乾', '巧克力碎片')
new_makefood.make_drink('large', 'coke')
| [
"terranandes@gmail.com"
] | terranandes@gmail.com |
d611f9214d27d69329f9ff64f0726ddad20e8d0d | 69d1d1ff2f854cca53c9f0113efb84469a0b409b | /hc-venv/lib/python3.6/site-packages/compressor/filters/css_default.py | a33af2f5c16620ac6b98bd26f5cc22f0f1a2c615 | [
"BSD-3-Clause"
] | permissive | niti15/heroku | be8da097d025cfe688352358b6473f7b1c4c1a15 | 21233761b3fc3113ce463c52af2ca6290d13e057 | refs/heads/master | 2022-12-14T13:05:16.410759 | 2018-01-23T06:01:25 | 2018-01-23T06:01:25 | 118,564,704 | 0 | 1 | BSD-3-Clause | 2022-12-07T23:54:57 | 2018-01-23T05:56:36 | Python | UTF-8 | Python | false | false | 5,914 | py | import os
import re
import posixpath
from compressor.cache import get_hashed_mtime, get_hashed_content
from compressor.conf import settings
from compressor.filters import FilterBase, FilterError
URL_PATTERN = re.compile(r"""
url\(
\s* # any amount of whitespace
([\'"]?) # optional quote
(.*?) # any amount of anything, non-greedily (this is the actual url)
\1 # matching quote (or nothing if there was none)
\s* # any amount of whitespace
\)""", re.VERBOSE)
SRC_PATTERN = re.compile(r'src=([\'"])(.*?)\1')
SCHEMES = ('http://', 'https://', '/')
class CssAbsoluteFilter(FilterBase):
def __init__(self, *args, **kwargs):
super(CssAbsoluteFilter, self).__init__(*args, **kwargs)
self.root = settings.COMPRESS_ROOT
self.url = settings.COMPRESS_URL.rstrip('/')
self.url_path = self.url
self.has_scheme = False
def input(self, filename=None, basename=None, **kwargs):
if not filename:
return self.content
self.path = basename.replace(os.sep, '/')
self.path = self.path.lstrip('/')
if self.url.startswith(('http://', 'https://')):
self.has_scheme = True
parts = self.url.split('/')
self.url = '/'.join(parts[2:])
self.url_path = '/%s' % '/'.join(parts[3:])
self.protocol = '%s/' % '/'.join(parts[:2])
self.host = parts[2]
self.directory_name = '/'.join((self.url, os.path.dirname(self.path)))
return SRC_PATTERN.sub(self.src_converter,
URL_PATTERN.sub(self.url_converter, self.content))
def guess_filename(self, url):
local_path = url
if self.has_scheme:
# COMPRESS_URL had a protocol,
# remove it and the hostname from our path.
local_path = local_path.replace(self.protocol + self.host, "", 1)
# remove url fragment, if any
local_path = local_path.rsplit("#", 1)[0]
# remove querystring, if any
local_path = local_path.rsplit("?", 1)[0]
# Now, we just need to check if we can find
# the path from COMPRESS_URL in our url
if local_path.startswith(self.url_path):
local_path = local_path.replace(self.url_path, "", 1)
# Re-build the local full path by adding root
filename = os.path.join(self.root, local_path.lstrip('/'))
return os.path.exists(filename) and filename
def add_suffix(self, url):
filename = self.guess_filename(url)
if not filename:
return url
if settings.COMPRESS_CSS_HASHING_METHOD is None:
return url
if not url.startswith(SCHEMES):
return url
suffix = None
if settings.COMPRESS_CSS_HASHING_METHOD == "mtime":
suffix = get_hashed_mtime(filename)
elif settings.COMPRESS_CSS_HASHING_METHOD in ("hash", "content"):
suffix = get_hashed_content(filename)
else:
raise FilterError('COMPRESS_CSS_HASHING_METHOD is configured '
'with an unknown method (%s).' %
settings.COMPRESS_CSS_HASHING_METHOD)
fragment = None
if "#" in url:
url, fragment = url.rsplit("#", 1)
if "?" in url:
url = "%s&%s" % (url, suffix)
else:
url = "%s?%s" % (url, suffix)
if fragment is not None:
url = "%s#%s" % (url, fragment)
return url
def _converter(self, url):
if url.startswith(('#', 'data:')):
return url
elif url.startswith(SCHEMES):
return self.add_suffix(url)
full_url = posixpath.normpath('/'.join([str(self.directory_name),
url]))
if self.has_scheme:
full_url = "%s%s" % (self.protocol, full_url)
full_url = self.add_suffix(full_url)
return self.post_process_url(full_url)
def post_process_url(self, url):
"""
Extra URL processing, to be overridden in subclasses.
"""
return url
def url_converter(self, matchobj):
quote = matchobj.group(1)
converted_url = self._converter(matchobj.group(2))
return "url(%s%s%s)" % (quote, converted_url, quote)
def src_converter(self, matchobj):
quote = matchobj.group(1)
converted_url = self._converter(matchobj.group(2))
return "src=%s%s%s" % (quote, converted_url, quote)
class CssRelativeFilter(CssAbsoluteFilter):
"""
Do similar to ``CssAbsoluteFilter`` URL processing
but add a *relative URL prefix* instead of ``settings.COMPRESS_URL``.
"""
def post_process_url(self, url):
"""
Replace ``settings.COMPRESS_URL`` URL prefix with '../' * (N + 1)
where N is the *depth* of ``settings.COMPRESS_OUTPUT_DIR`` folder.
E.g. by default ``settings.COMPRESS_OUTPUT_DIR == 'CACHE'``,
the depth is 1, and the prefix will be '../../'.
If ``settings.COMPRESS_OUTPUT_DIR == 'my/compiled/data'``,
the depth is 3, and the prefix will be '../../../../'.
Example:
- original file URL: '/static/my-app/style.css'
- it has an image link: ``url(images/logo.svg)``
- compiled file URL: '/static/CACHE/css/abcdef123456.css'
- replaced image link URL: ``url(../../my-app/images/logo.svg)``
"""
old_prefix = self.url
if self.has_scheme:
old_prefix = '{}{}'.format(self.protocol, old_prefix)
# One level up from 'css' / 'js' folder
new_prefix = '..'
# N levels up from ``settings.COMPRESS_OUTPUT_DIR``
new_prefix += '/..' * len(list(filter(
None, os.path.normpath(settings.COMPRESS_OUTPUT_DIR).split(os.sep)
)))
return re.sub('^{}'.format(old_prefix), new_prefix, url)
| [
"arunkumar.g@cogzidel.com"
] | arunkumar.g@cogzidel.com |
cd87d6f14ac564e642e138bdb04bf5e5cdd2b99e | ffadf9541d01cf9af20c419759d48b1eb01bfd35 | /end/flask/views/user.py | 87d8eec415bdb16d7114dfccf477ab14951900fc | [] | no_license | 1987617587/lsh_py | b1bb1016eaafcba03bbc4a5310c1db04ae227af4 | 80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d | refs/heads/master | 2021-01-02T05:14:31.330287 | 2020-06-20T05:18:23 | 2020-06-20T05:18:23 | 239,498,994 | 2 | 1 | null | 2020-06-07T23:09:56 | 2020-02-10T11:46:47 | Python | UTF-8 | Python | false | false | 7,019 | py | import datetime
import sqlite3
from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, BadSignature
from flask import Blueprint, request, current_app, make_response, session
from flask import render_template, request, flash
from flask_mail import Message
from werkzeug.utils import redirect
from werkzeug.security import generate_password_hash, check_password_hash
# 新建用户模块蓝图
from .utils import mail
user_bp = Blueprint("user", __name__)
@user_bp.route("/regist", methods=["GET", "POST"])
def regist():
if request.method == "GET":
# return "请先登录"
flash({
})
return render_template('regist.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
password2 = request.form.get("password2")
error = None
if not username:
error = "用户名必填"
elif not password:
error = "密码必填"
elif not password2:
error = "重复密码必填"
elif password != password2:
error = "密码不一致"
else:
with sqlite3.connect("flask_demo.db") as con:
cur = con.cursor()
cur.execute("select * from user where username = ?", (username,))
r = cur.fetchall()
print(r)
if len(r) == 0:
print("可以创建")
try:
# 密码加密
security_password = generate_password_hash(password)
cur.execute("insert into user (username,password) values (?,?)", (username, security_password))
cur.execute("select * from user where username = ?", (username,))
r2 = cur.fetchone()
print(r2[0])
# 发送邮件
# 邮箱加密 密钥使用自动产生的密钥
print(current_app.secret_key)
seria_util = TimedJSONWebSignatureSerializer(current_app.secret_key, expires_in=3600)
serstr = seria_util.dumps({"id": r2[0]}).decode("utf-8")
# msg = Message(subject="神秘组织激活邮件", recipients=[username])
# # msg.html = "<a href='http://127.0.0.1:5000/active/" + str(r2[0]) + "'>点击激活</a>"
# msg.html = "<a href='http://127.0.0.1:5000/active/%s'>点击激活</a>" % (serstr,)
# mail.send(msg)
from tasks import sendmail
sendmail.delay(username, serstr)
# 发送邮件成功再提交
con.commit()
# 邮箱发送失败,不写入数据库
except Exception as e:
print(e)
con.rollback()
return "出现异常"
return redirect('/login')
error = "用户名已存在"
if error:
# flash(error, category=error)
flash({
"error": error,
"username": username
})
return redirect('/regist')
@user_bp.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
flash({
})
# return "请先登录"
return render_template('login.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
error = None
if not username:
error = "用户名必填"
elif not password:
error = "密码必填"
else:
with sqlite3.connect("flask_demo.db") as con:
cur = con.cursor()
cur.execute("select * from user where username = ?", (username,))
r = cur.fetchone()
if r:
print(r, r[2])
# 校验密码
if check_password_hash(r[2], password):
print("找到用户")
if r[5] == 0:
error = "用户未激活,不能直接登录,请前往邮箱激活"
flash({
"error": error,
"username": username,
"password": password,
})
return redirect('/login')
# 允许登录,生成cookie,检验是否含有目标路由
next = request.args.get("next")
if next:
res = make_response(redirect(next))
# res.set_cookie("user", username,
# expires=datetime.datetime.now() + datetime.timedelta(days=7))
session["user"] = username
return res
# res = request.cookie.set('user')
res = make_response(redirect("/"))
# res.set_cookie("user", username, expires=datetime.datetime.now() + datetime.timedelta(days=7))
session["user"] = username
return res
# return redirect('/')
error = "用户名或密码错误"
# 需要在本次请求中将信息写入session (前提必须配置secret_key)
# flash("提示内容")
# 下次请求中取获取、并且从session移除
# get_flashed_messages()
if error:
# flash(error, category=error)
flash({
"error": error,
"username": username,
"password": password,
})
return redirect('/login')
# return render_template('booklist.html', booklist=["倚天屠龙记", "神雕侠侣", "天龙八部"], username=username)
# return redirect('/')
@user_bp.route("/active/<serstr>", methods=["GET"])
def activeuser(serstr):
# 解密
try:
seria_util = TimedJSONWebSignatureSerializer(current_app.secret_key)
obj = seria_util.loads(serstr)
print(obj["id"])
id = obj["id"]
print(id)
with sqlite3.connect("flask_demo.db") as con:
cur = con.cursor()
cur.execute("update user set is_active = 1 where id = ?", (id,))
con.commit()
return redirect("/login")
except SignatureExpired as e:
print(e, "过期了")
except BadSignature as e:
print(e, "密钥错误")
@user_bp.route("/loginout", methods=["GET", "POST"])
def loginout():
res = make_response(redirect("/"))
# res.delete_cookie("user")
session.pop("user")
return res
| [
"1987617587@qq.com"
] | 1987617587@qq.com |
988d04067977134da13fcb1dc12ea92f4c20cea7 | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/get_positions_20210204132316.py | b59336fa2236adffdda2bcd1f15dc518e53c49f6 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
positions = c.Account.Fields.POSITIONS
r = c.get_account(config.tda_acct_num, fields=positions)
stocks = r.json()['securitiesAccount']['positions']
# for stock in stocks:
# print('--------------------------------')
# print(stock)
for stock in stocks:
print('--------------------------------')
print(stock)
print(json.dumps(stock.json(), indent=4)) | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
85e28513bc44090bad42be8c056a3c6f582f7020 | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/phuhungnet.py | ffdfdbc44c6e30bac2055d85ec738301eef1798a | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='product_page']/div[@class='title']/h3",
'price' : "//tr/td[@class='product_price']",
'category' : "",
'description' : "//div[@id='product_content']",
'images' : "//td[@id='product_image']/a/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'phuhung.net'
allowed_domains = ['phuhung.net']
start_urls = ['http://phuhung.net/san-pham.html']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/san-pham/[a-zA-Z0-9-]+-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/danh-muc/[\w\d-]+(\.html$|/trang/\d+)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
b2c42c9ce540c3e38808da62b512e8c5d554f676 | d429c131df32789e11a98e9e965e652176fcee97 | /546A - Soldier and Bananas.py | eeac4d1e7f654cceeca7820ee8778d77d3839ec5 | [] | no_license | shan-mathi/Codeforces | a11841a1ef1a1ef78e3d506d58d9fdf4439421bd | 6f8166b79bea0eb1f575dbfc74c252ba71472c7e | refs/heads/main | 2023-06-15T08:25:41.130432 | 2021-06-24T10:36:06 | 2021-06-24T10:36:06 | 341,176,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | #108303540 Feb/23/2021 18:59UTC+5.5 Shan_XD 546A - Soldier and Bananas PyPy 3 Accepted 93 ms 0 KB
k,n, w= input().split()
k=int(k)
n=int(n)
w=int(w)
price= int(k*w*(w+1)/2 - n)
if price>0:
print(price)
else:
print(0)
| [
"noreply@github.com"
] | shan-mathi.noreply@github.com |
370e7546abc9bf88064840023fe031dea6665be5 | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/easy/p543_diameterOfBinaryTree.py | 3f00a3bcafd72f54b4f94f6d4084e95c39b58f11 | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.max_length = 0
def diameterOfBinaryTree(self, root: TreeNode) -> int:
def dfs(node):
if node is None:
return 0
left = dfs(node.left)
right = dfs(node.right)
self.max_length = max(self.max_length, left + right + 1)
return max(left, right) + 1
dfs(root)
if self.max_length > 0:
return self.max_length - 1
else:
return self.max_length
t1 = TreeNode(1)
t1.left = TreeNode(2)
t1.right = TreeNode(3)
slu = Solution()
print(slu.diameterOfBinaryTree(t1))
| [
"8390671@qq.com"
] | 8390671@qq.com |
9d4c9414612c5615070d44e735ffa1993c330ba1 | 2372281d6e08dfc517c60d5a0cce678f15f904db | /experiments/output_perturbation/scikit-learn/examples/manifold/plot_swissroll.py | 42943ba64f5a68826e0f6f7a65041be8a3702e92 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | amzn/differential-privacy-bayesian-optimization | b647b8281be0c2ea335104c9c92e717ce07ce63f | 07fec631d00bf3381ca24f7d73757aef6dfda9d3 | refs/heads/master | 2023-03-13T04:10:22.753152 | 2022-10-03T19:26:44 | 2022-10-03T19:26:44 | 252,813,133 | 27 | 19 | Apache-2.0 | 2022-10-03T19:26:45 | 2020-04-03T18:45:17 | Python | UTF-8 | Python | false | false | 1,295 | py | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| [
"tdiethe@amazon.com"
] | tdiethe@amazon.com |
ecf0995ee33ade107f4536bd80794cbb367f3e0d | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/module_utils/aws/waf.py | 6758b40ed65a31131243e1939a156c9d458a75db | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 7,178 | py | # Copyright (c) 2017 Will Thames
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Web Application Firewall modules
"""
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
MATCH_LOOKUP = {
'byte': {
'method': 'byte_match_set',
'conditionset': 'ByteMatchSet',
'conditiontuple': 'ByteMatchTuple',
'type': 'ByteMatch'
},
'geo': {
'method': 'geo_match_set',
'conditionset': 'GeoMatchSet',
'conditiontuple': 'GeoMatchConstraint',
'type': 'GeoMatch'
},
'ip': {
'method': 'ip_set',
'conditionset': 'IPSet',
'conditiontuple': 'IPSetDescriptor',
'type': 'IPMatch'
},
'regex': {
'method': 'regex_match_set',
'conditionset': 'RegexMatchSet',
'conditiontuple': 'RegexMatchTuple',
'type': 'RegexMatch'
},
'size': {
'method': 'size_constraint_set',
'conditionset': 'SizeConstraintSet',
'conditiontuple': 'SizeConstraint',
'type': 'SizeConstraint'
},
'sql': {
'method': 'sql_injection_match_set',
'conditionset': 'SqlInjectionMatchSet',
'conditiontuple': 'SqlInjectionMatchTuple',
'type': 'SqlInjectionMatch',
},
'xss': {
'method': 'xss_match_set',
'conditionset': 'XssMatchSet',
'conditiontuple': 'XssMatchTuple',
'type': 'XssMatch'
},
}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_rule_with_backoff(client, rule_id):
return client.get_rule(RuleId=rule_id)['Rule']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_byte_match_set_with_backoff(client, byte_match_set_id):
return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_ip_set_with_backoff(client, ip_set_id):
return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_xss_match_set_with_backoff(client, xss_match_set_id):
return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
def get_rule(client, module, rule_id):
try:
rule = get_rule_with_backoff(client, rule_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain waf rule")
match_sets = {
'ByteMatch': get_byte_match_set_with_backoff,
'IPMatch': get_ip_set_with_backoff,
'SizeConstraint': get_size_constraint_set_with_backoff,
'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
'XssMatch': get_xss_match_set_with_backoff
}
if 'Predicates' in rule:
for predicate in rule['Predicates']:
if predicate['Type'] in match_sets:
predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
# replaced by Id from the relevant MatchSet
del(predicate['DataId'])
return rule
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_web_acl_with_backoff(client, web_acl_id):
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
def get_web_acl(client, module, web_acl_id):
try:
web_acl = get_web_acl_with_backoff(client, web_acl_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl")
if web_acl:
try:
for rule in web_acl['Rules']:
rule.update(get_rule(client, module, rule['RuleId']))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
return camel_dict_to_snake_dict(web_acl)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_rules_with_backoff(client):
paginator = client.get_paginator('list_rules')
return paginator.paginate().build_full_result()['Rules']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_web_acls_with_backoff(client):
paginator = client.get_paginator('list_web_acls')
return paginator.paginate().build_full_result()['WebACLs']
def list_web_acls(client, module):
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acls")
def get_change_token(client, module):
try:
token = client.get_change_token()
return token['ChangeToken']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain change token")
@AWSRetry.backoff(tries=10, delay=2, backoff=2.0, catch_extra_error_codes=['WAFStaleDataException'])
def run_func_with_change_token_backoff(client, module, params, func):
params['ChangeToken'] = get_change_token(client, module)
return func(**params)
| [
"sifang@cisco.com"
] | sifang@cisco.com |
13ea71c51d84733ff79fcfc4d883854d4494cbbd | 74921f69b10e06662d9818680453ef88db3fbd60 | /test/scikit-learn_reduce.py | 8fc11fd276c2de400d278c26fcec7a1c4db86f22 | [] | no_license | zoulida/skl001 | 8f9b99f17be35bf75cbd57a43013835b94243cfa | 45510de51e04873d303e7a84697517f0f63d3ca1 | refs/heads/master | 2023-03-04T17:42:18.389702 | 2023-02-27T12:59:38 | 2023-02-27T12:59:38 | 159,416,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | __author__ = 'zoulida'
import numpy as np
import matplotlib.pyplot as plt
###########1.数据生成部分##########
def f(x1, x2):
y = 0.5 * np.sin(x1) + 0.5 * np.cos(x2) + 3 + 0.1 * x1
return y
def load_data():
x1_train = np.linspace(0,50,500)
x2_train = np.linspace(-10,10,500)
data_train = np.array([[x1,x2,f(x1,x2) + (np.random.random(1)-0.5)] for x1,x2 in zip(x1_train, x2_train)])
x1_test = np.linspace(0,50,100)+ 0.5 * np.random.random(100)
x2_test = np.linspace(-10,10,100) + 0.02 * np.random.random(100)
data_test = np.array([[x1,x2,f(x1,x2)] for x1,x2 in zip(x1_test, x2_test)])
return data_train, data_test
train, test = load_data()
x_train, y_train = train[:,:2], train[:,2] #数据前两列是x1,x2 第三列是y,这里的y有随机噪声
x_test ,y_test = test[:,:2], test[:,2] # 同上,不过这里的y没有噪声
###########2.回归部分##########
def try_different_method(model):
print(x_train,y_train)
model.fit(x_train,y_train)
score = model.score(x_test, y_test)
result = model.predict(x_test)
plt.figure()
plt.plot(np.arange(len(result)), y_test,'go-',label='true value')
plt.plot(np.arange(len(result)),result,'ro-',label='predict value')
plt.title('score: %f'%score)
plt.legend()
plt.show()
###########3.具体方法选择##########
####3.1决策树回归####
from sklearn import tree
model_DecisionTreeRegressor = tree.DecisionTreeRegressor()
####3.2线性回归####
from sklearn import linear_model
model_LinearRegression = linear_model.LinearRegression()
####3.3SVM回归####
from sklearn import svm
model_SVR = svm.SVR()
####3.4KNN回归####
from sklearn import neighbors
model_KNeighborsRegressor = neighbors.KNeighborsRegressor()
####3.5随机森林回归####
from sklearn import ensemble
model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20)#这里使用20个决策树
####3.6Adaboost回归####
from sklearn import ensemble
model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50)#这里使用50个决策树
####3.7GBRT回归####
from sklearn import ensemble
model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100)#这里使用100个决策树
####3.8Bagging回归####
from sklearn.ensemble import BaggingRegressor
model_BaggingRegressor = BaggingRegressor()
####3.9ExtraTree极端随机树回归####
from sklearn.tree import ExtraTreeRegressor
model_ExtraTreeRegressor = ExtraTreeRegressor()
###########4.具体方法调用部分##########
try_different_method(model_DecisionTreeRegressor)
| [
"zoulida@163.com"
] | zoulida@163.com |
2efdf21c673347a2eea07e6d43d8c2e2b376815a | ab3d5ea4bf0e48914ed14fcf16e5b1d752f199ba | /pcg_libraries/src/pcg_gazebo/parsers/sdf/diffuse.py | 8a1d995ebb4f5624c56657e02f2407d4b61d5d3e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | boschresearch/pcg_gazebo_pkgs | 5f1004d0de874d4d1abc4eb695777013027158b2 | 1c112d01847ca4f8da61ce9b273e13d13bc7eb73 | refs/heads/master | 2020-06-11T06:28:36.228431 | 2020-02-07T13:05:28 | 2020-02-07T13:05:28 | 193,876,180 | 44 | 3 | NOASSERTION | 2020-02-07T12:00:55 | 2019-06-26T09:45:05 | Python | UTF-8 | Python | false | false | 1,122 | py | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLVector
class Diffuse(XMLVector):
_NAME = 'diffuse'
_TYPE = 'sdf'
def __init__(self, default=[0, 0, 0, 1]):
XMLVector.__init__(self, 4)
self._default = default
self._value = default
def _set_value(self, value):
assert self._is_numeric_vector(value, [0, 1]), \
'Diffuse input is not a numeric vector'
XMLVector._set_value(self, value)
| [
"Musa.Marcusso@de.bosch.com"
] | Musa.Marcusso@de.bosch.com |
20f2aafb86675e7d4a697c1695b23c6829ce451c | 41272e71b1d3aa2e14a20c15f5719c168b48588d | /hw/hw4/submit/b04902053/q_11_12.py | adf9d2da6e5b24bfd9f11dc037bf55193841e672 | [] | no_license | openopentw/2019-ML | 4127cfaa8ac6e600eb62f320bbc59d0b113e2160 | b89e6346875aa4c91ce024ea32dee89d4f86296d | refs/heads/master | 2022-02-20T14:24:14.696456 | 2019-09-25T15:57:25 | 2019-09-25T15:57:25 | 210,883,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py | """ This script do the question 11 ~ 12 of ml-hw4. """
import argparse
import matplotlib.pyplot as plt
import numpy as np
from knn import KNN
def get_data(train_path, test_path):
""" Get the data. """
train = np.genfromtxt(train_path)
test = np.genfromtxt(test_path)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = test[:, :-1]
y_test = test[:, -1]
return x_train, y_train, x_test, y_test
def main():
""" Main function. """
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('train', help='hw4_train.dat')
parser.add_argument('test', help='hw4_test.dat')
parser.add_argument('-o', '--output_to_png', default=False, action='store_true',
help='Output image to files. (default is display on screen)')
args = parser.parse_args()
# get data
x_train, y_train, x_test, y_test = get_data(args.train, args.test)
# train & test
k_list = [1, 3, 5, 7, 9]
ein_list = []
eout_list = []
for k in k_list:
knn = KNN(k)
knn.train(x_train, y_train)
y_train_pred = knn.test(x_train)
ein_list.append((y_train_pred != y_train).mean())
y_test_pred = knn.test(x_test)
eout_list.append((y_test_pred != y_test).mean())
# plot
# q_11
plt.scatter(k_list, ein_list)
plt.title('$E_{in}(g_{k-nbor})$ vs. $k$')
plt.xlabel('$k$')
plt.ylabel('$E_{in}(g_{k-nbor})$')
if args.output_to_png:
plt.savefig('q_11')
else:
plt.show()
plt.clf()
# q_12
plt.scatter(k_list, eout_list)
plt.title('$E_{out}(g_{k-nbor})$ vs. $k$')
plt.xlabel('$k$')
plt.ylabel('$E_{out}(g_{k-nbor})$')
if args.output_to_png:
plt.savefig('q_12')
else:
plt.show()
plt.clf()
if __name__ == '__main__':
main()
| [
"openopentw@gmail.com"
] | openopentw@gmail.com |
2e14888e492a7b54fb82fd10b66880ea2403a5bb | 68d9fffda9c1ee0f4819371067adfd4985332319 | /python/486.预测赢家.py | 6aa5955ab78f2301f0029069a7a8cd8abfe7ce92 | [
"MIT"
] | permissive | Wanger-SJTU/leetcode-solutions | ade9486cef05ede6fa44cbbb5d726037518fac15 | eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4 | refs/heads/master | 2023-04-11T19:56:13.561234 | 2021-05-10T12:00:28 | 2021-05-10T12:00:28 | 129,606,869 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #
# @lc app=leetcode.cn id=486 lang=python3
#
# [486] 预测赢家
#
class Solution:
def PredictTheWinner(self, nums: List[int]) -> bool:
| [
"howiechen@sjtu.edu.cn"
] | howiechen@sjtu.edu.cn |
8c06e9cdcc7b932bfbf0d6fd7fa53954805f30c7 | 51893a3bc09ebd2d999ac65d91f5da70d082b686 | /venv/bin/pip | cf06fe3bebb84549e67bbc3ddae77842e4fe6b7a | [] | no_license | Erinnyen/openacademy | 3368a772db31e8f33188e3c6b0a523256fc30271 | 97fe0752b9d35c573e6d73a571bef699d3a6bc84 | refs/heads/master | 2020-04-23T21:42:08.206176 | 2019-02-19T13:54:20 | 2019-02-19T13:54:20 | 171,478,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | #!/root/Desktop/openacademy/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain | |
05c8ee695522611abc7da906f4a63f76d7846df8 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Introduction_to_numerical_programming_using_Python_and_CPP_Beu/Ch12/Python/P12-OscillatorSH.py | cf153eed23edced0b2e8bfcd64425542531a13aa | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 3,058 | py | # Eigenstates of the 1D Schroedinger equation for the harmonic oscillator
# y" = 2 [V(x) - E] y, y(0) = y0, y'(0) = y'0, y(+inf) = 0
# V(x) = 0.5*x*x
# using a shooting algorithm based on the Numerov method
#----------------------------------------------------------------------------
from math import *
from ode import *
from specfunc import *
def Pot(x): return 0.5e0*x*x # Potential for harmonic oscillator
# main
xx = 10e0 # limit of x-mesh
xc = 6e0 # checkpoint for vanishing solution (xc < xx)
par = 0 # parity of eigenstate (0/1)
Emin = 0e0 # lower limit for eigenvalues
Emax = 1e2 # upper limit for eigenvalues
dE = 0.1e0 # minimum separation of eigenvalues
eps = 1e-4 # tolerance for solution at xc
hx = 1e-4 # x-mesh step size
nx = int(xx/hx + 0.5) + 1 # number of x-mesh points
nc = int(xc/hx + 0.5) + 1 # index of checkpoint for vanishing y
x = [0]*(nx+1); y = [0]*(nx+1) # x-mesh, solution
V = [0]*(nx+1) # tabulated potential
for m in range(1,nx+1):
x[m] = (m-1)*hx # integration mesh
V[m] = Pot(x[m]) # tabulate potential
Ew = Emin # lower limit of search window for E, [Ew,Ew+dE]
while (Ew < Emax): # loop over eigenvalue windows
# initial values at x = 0
if (par == 0): y0 = 1e0; dy0 = 0e0 # even y
else: y0 = 0e0; dy0 = 1e0 # odd y
(E, exist) = ShootQM(Ew,Ew+dE,V,x,y,nx,nc,y0,dy0,eps)
if (exist): break
Ew += dE # shift [Ew,Ew+dE] for next shoot
if (exist):
n = int(E) # quantum number
f = 0e0 # normalize y by trapezoidal rule
for m in range(1,nc+1): f += y[m]*y[m]*hx # norm for [0,+inf]
f = sqrt(2e0*f)
if (int(n/2) % 2): f = -f # sign correction
for m in range(1,nx+1): y[m] /= f
f = sqrt(pi) # norm of Hermite polynomials
for i in range(1,n+1): f *= 2e0 * i
f = 1e0/sqrt(f)
out = open("shoot.txt","w")
out.write("E{0:1d} = {1:8.5f}\n".format(n,E))
out.write(" x y{0:1d} err\n".format(n))
for m in range(1,nc+1):
(yH, d) = Hermite(n,x[m]); yH *= f * exp(-0.5*x[m]*x[m])
out.write(("{0:10.5f}{1:10.5f}{2:10.5f}\n").format(x[m],y[m],yH-y[m]))
out.close()
else:
print("No solution found !")
| [
"me@yomama.com"
] | me@yomama.com |
c11bb84897ecc44847a612627f3edf3aff4d5254 | 003de705ef5cdec80dfd216d3c26650c8df8204a | /pyexe/vimtrace | ec8095a6d81e2f1e1d0503c012ebb924d83e902f | [] | no_license | tyochen/winterpy | c71ae8f595e1a27fafe5d9ebaf059661e5f106a5 | 1072846f834f35f4e777398aab3756289bbbfd38 | refs/heads/master | 2020-11-30T00:34:05.774957 | 2017-06-27T05:29:06 | 2017-06-27T05:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | #!/usr/bin/env python3
'''strace wrapper'''
import os
import sys
import subprocess
import signal
def sigchld(signo, sigframe):
pass
def main():
tracer = sys.argv[1]
if tracer not in ('strace', 'ltrace'):
sys.exit('%s not supported.' % tracer)
vim = subprocess.Popen(
["vim", "+set buftype=nofile", "+setf strace", "-"],
preexec_fn = os.setpgrp,
stdin = subprocess.PIPE,
)
fd = vim.stdin.fileno()
tracerp = subprocess.Popen(
[tracer, '-o', '/proc/self/fd/%s' % fd] + sys.argv[2:],
pass_fds=(fd,),
)
while True:
try:
tracerp.wait()
except KeyboardInterrupt:
continue
vim.stdin.close()
break
# ignore SIGTTOU so that we can fg ourself
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
# Kernel, send me SIGCHLDs please!
signal.signal(signal.SIGCHLD, sigchld)
continued = False
while True:
if not continued:
# fg the Vim process (which has its group id the same as its pid)
os.tcsetpgrp(0, vim.pid)
# continue it in case it's outputed some text before we make it the
# foreground process
vim.send_signal(signal.SIGCONT)
siginfo = signal.sigwaitinfo([signal.SIGCHLD])
if siginfo.si_status == signal.SIGTSTP:
# fg ourself
os.tcsetpgrp(0, os.getpgid(0))
# suspend ourself
os.kill(0, signal.SIGTSTP)
continued = False
elif siginfo.si_status == signal.SIGCONT:
continued = True
continue
else:
vim.wait()
break
if __name__ == '__main__':
try:
import setproctitle
setproctitle.setproctitle('vimtrace')
except ImportError:
pass
main()
| [
"lilydjwg@gmail.com"
] | lilydjwg@gmail.com | |
a1eb347125522bf0a2a32fcbbdf9a20ac9b8d071 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/sql/v20210501preview/get_job_target_group.py | 1b56ea0978ba1e209a09f92c225b2ab7df201184 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,593 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetJobTargetGroupResult',
'AwaitableGetJobTargetGroupResult',
'get_job_target_group',
'get_job_target_group_output',
]
@pulumi.output_type
class GetJobTargetGroupResult:
"""
A group of job targets.
"""
def __init__(__self__, id=None, members=None, name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if members and not isinstance(members, list):
raise TypeError("Expected argument 'members' to be a list")
pulumi.set(__self__, "members", members)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def members(self) -> Sequence['outputs.JobTargetResponse']:
"""
Members of the target group.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetJobTargetGroupResult(GetJobTargetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobTargetGroupResult(
id=self.id,
members=self.members,
name=self.name,
type=self.type)
def get_job_target_group(job_agent_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
target_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobTargetGroupResult:
"""
A group of job targets.
:param str job_agent_name: The name of the job agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str target_group_name: The name of the target group.
"""
__args__ = dict()
__args__['jobAgentName'] = job_agent_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['targetGroupName'] = target_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20210501preview:getJobTargetGroup', __args__, opts=opts, typ=GetJobTargetGroupResult).value
return AwaitableGetJobTargetGroupResult(
id=__ret__.id,
members=__ret__.members,
name=__ret__.name,
type=__ret__.type)
@_utilities.lift_output_func(get_job_target_group)
def get_job_target_group_output(job_agent_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
target_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobTargetGroupResult]:
"""
A group of job targets.
:param str job_agent_name: The name of the job agent.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
:param str target_group_name: The name of the target group.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
75966c1d83ecc39d1e539c532e8eefea17eb211a | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/301-400/week 327/2531. Make Number of Distinct Characters Equal/Make Number of Distinct Characters Equal.py | 09b85c7a7b821b8b1d322f1f4d310ba8f48bcfda | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Make Number of Distinct Characters Equal.py
@time: 2023/01/08
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
import collections
class Solution:
def isItPossible(self, word1: str, word2: str) -> bool:
cnt1, cnt2 = collections.Counter(word1), collections.Counter(word2)
s1, s2 = set(word1), set(word2)
for w1 in s1:
for w2 in s2:
cnt1[w1] -= 1
cnt1[w2] += 1
cnt2[w1] += 1
cnt2[w2] -= 1
if cnt1[w1] == 0:
cnt1.pop(w1)
if cnt2[w2] == 0:
cnt2.pop(w2)
if len(cnt1) == len(cnt2):
return True
cnt1[w1] += 1
cnt1[w2] -= 1
cnt2[w1] -= 1
cnt2[w2] += 1
if cnt1[w2] == 0:
cnt1.pop(w2)
if cnt2[w1] == 0:
cnt2.pop(w1)
return False
so = Solution()
print(so.isItPossible(word1="ac", word2="b"))
print(so.isItPossible(word1="b", word2="ac"))
print(so.isItPossible(word1="abcc", word2="aab"))
print(so.isItPossible("ab", "abcc"))
print(so.isItPossible("a", "bb"))
| [
"905317742@qq.com"
] | 905317742@qq.com |
ff55702d4b4e6a804eaa698b5e110da8d405fd51 | 8f2f8887f4fdd91a6739a44ec3f14aea8b01b7ad | /blog/views.py | 747961de13716882f1886bc6a2eb3807380e127a | [] | no_license | xarala221/djangoblog | b356ff34ae69edcf7347cd0e234d5ae40bb14aa0 | 2e04f44ca9848bb5d4669acfda0d6b84491cdfbb | refs/heads/master | 2020-03-10T21:45:20.357363 | 2018-04-29T10:49:33 | 2018-04-29T10:49:33 | 129,601,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404
from django.utils import timezone
from django.urls import reverse
from blog.models import Blog
from django.contrib.auth.models import User
from blog.forms import BlogForm
def index(request):
title = "Welcome To Edge"
context = {
'title': title
}
return render(request, "blog/index.html", context)
def blog_list(request):
all_blog = Blog.objects.all().order_by("-created_at")
paginator = Paginator(all_blog, 10) # Show 25 contacts per page
page = request.GET.get('page')
blogs = paginator.get_page(page)
title = "All my blog post"
context = {
'title': title,
'blogs': blogs
}
return render(request, "blog/blog_list.html", context)
"""
def listing(request):
contact_list = Contacts.objects.all()
paginator = Paginator(contact_list, 25) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
return render(request, 'list.html', {'contacts': contacts})
"""
def blog_detail(request, id=None):
blog = get_object_or_404(Blog, id=id)
context = {
'blog': blog
}
return render(request, "blog/blog_detail.html", context)
def add_blog(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
title = "Create"
if request.method == "POST":
form = BlogForm(request.POST, request.FILES)
if form.is_valid():
blog = form.save(commit=False)
blog.author = request.user
blog.created_at = timezone.now()
blog.save()
return redirect("details", blog.id)
else:
form = BlogForm()
return render(request, "blog/form.html", {"form": form, "title": title})
def edit_blog(request, id=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
blog = get_object_or_404(Blog, id=id)
title = "Update"
#form = BlogForm(request.POST or None, instance=blog)
if request.method == "POST":
form = BlogForm(request.POST, request.FILES, instance=blog)
#form = BlogForm(request.POST)
if form.is_valid():
blog = form.save(commit=False)
blog.author = request.user
blog.created_at = timezone.now()
blog.save()
return redirect("blog:blog")
else:
form = BlogForm(instance=blog)
return render(request, "blog/form.html", {"form": form, "title": title})
def delete_blog(request, id=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
if request.user.is_superuser:
blog = get_object_or_404(Blog, id=id)
title = "Delete Page"
else:
blog = get_object_or_404(Blog, id=id, author=request.user)
if request.method == "POST":
blog.delete()
return redirect(reverse("blog:blog"))
else:
return render(request, "blog/delete.html", {"blog": blog,"title": title})
| [
"xaralaxarala@gmail.com"
] | xaralaxarala@gmail.com |
1a0f90d71135623cf0d58bb539f3d54fe3537734 | c680336a3e50d329321fba8e8c6180c416245bb2 | /tests/operators/gpu/test_ms_addn.py | 900bbffcf7b802b0afaa5833f258e202337d7983 | [
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Unlicense",
"BSD-2-Clause"
] | permissive | kqingcan/akg | 88b7b9a4169490028f6674a675d8d1b325131569 | febec566b1c2c4c213f0f21d3183cef843564d82 | refs/heads/master | 2023-07-18T09:31:24.152923 | 2021-08-25T09:19:15 | 2021-08-25T09:19:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
from tests.common.gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import gpu_profiling
from akg.utils.format_transform import to_tvm_nd_array
from tests.common.tensorio import compare_tensor
from akg.ops.math_gpu.addn import addn
def gen_data(shape, shapes, dtype, n):
support_list = {"float16": np.float16, "float32": np.float32}
inputs = []
for i in range(n):
one_input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
inputs.append(one_input)
shapes.append(shape)
expect = np.sum(inputs, axis=0)
output = np.full(shape, np.nan, dtype)
return expect, inputs, output
def test_ms_addn(shape, dtype, n, poly_sch=False):
shapes = []
for i in range(n):
shapes.append(shape)
if poly_sch:
mod = utils.op_build_test(addn, [shapes], [dtype], attrs={"target": "cuda"}, kernel_name="addn")
expect, inputs, output = gen_data(shape, shapes, dtype, n)
output = utils.mod_launch(mod, (*inputs, output), expect=expect)
res = compare_tensor(output, expect, rtol=5e-03, atol=1.e-8)
print("Test {}".format("Pass" if res else "Fail"))
if not res:
print("Error cuda:========================")
print(mod.imported_modules[0].get_source())
raise AssertionError("Test fail")
inputs = to_tvm_nd_array(inputs)
expect = to_tvm_nd_array(expect)
gpu_profiling(mod, *inputs, expect, 400)
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
4c808d2229ca54523fd42cd5cfee19a04fa1f523 | 50b77b527b95659c6ac8484a1091a70b4ad25d73 | /2019/07/aoc7.py | 7afe8c9142019d55fa0b662891f4a481e1564b66 | [] | no_license | cjuub/advent-of-code | d3a4569dd0b7bf7e10dc6a76a1ffe569df4e93a2 | bb92d8ae96cde8c3e57abed26019e692fa6e168f | refs/heads/master | 2023-01-10T00:32:56.847184 | 2023-01-02T20:46:57 | 2023-01-02T20:46:57 | 160,243,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,299 | py | #!/usr/bin/env python3
from typing import List
class IntCodeComputer:
OP_ADD = 1
OP_MUL = 2
OP_LOAD = 3
OP_STORE = 4
OP_JUMP_IF_TRUE = 5
OP_JUMP_IF_FALSE = 6
OP_LESS_THAN = 7
OP_EQUALS = 8
OP_HALT = 99
class HaltException(Exception):
pass
def __init__(self, memory: List[int]):
self._memory = memory[:] + [0] * 10000
self._pc = 0
self._input = []
self._inputs_read = 0
self._output = 0
self._instructions = {IntCodeComputer.OP_ADD: self._add,
IntCodeComputer.OP_MUL: self._mul,
IntCodeComputer.OP_LOAD: self._load,
IntCodeComputer.OP_STORE: self._store,
IntCodeComputer.OP_JUMP_IF_TRUE: self._jump_if_true,
IntCodeComputer.OP_JUMP_IF_FALSE: self._jump_if_false,
IntCodeComputer.OP_LESS_THAN: self._less_than,
IntCodeComputer.OP_EQUALS: self._equals}
def _add(self, op1, op2, res):
self._memory[res] = op1 + op2
self._pc += 4
def _mul(self, op1, op2, res):
self._memory[res] = op1 * op2
self._pc += 4
def _load(self, op1, op2, res):
self._memory[op1] = self._input[self._inputs_read]
self._inputs_read += 1
self._pc += 2
def _store(self, op1, op2, res):
self._output = op1
self._pc += 2
return self._output
def _jump_if_true(self, op1, op2, res):
if op1 != 0:
self._pc = op2
else:
self._pc += 3
def _jump_if_false(self, op1, op2, res):
if op1 == 0:
self._pc = op2
else:
self._pc += 3
def _less_than(self, op1, op2, res):
if op1 < op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def _equals(self, op1, op2, res):
if op1 == op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def execute(self) -> int:
while True:
op_code_str = str(self._memory[self._pc]).rjust(5, '0')
op_code = int(op_code_str[-2:])
op1_mode = int(op_code_str[2])
op2_mode = int(op_code_str[1])
op3_mode = int(op_code_str[0])
if op_code == IntCodeComputer.OP_HALT:
raise IntCodeComputer.HaltException(self._output)
if op_code == IntCodeComputer.OP_LOAD:
op1_mode = 1
op1 = self._memory[self._memory[self._pc + 1]] if op1_mode == 0 else self._memory[self._pc + 1]
op2 = self._memory[self._memory[self._pc + 2]] if op2_mode == 0 else self._memory[self._pc + 2]
res = self._memory[self._pc + 3] if op3_mode == 0 else self._pc + 3
ret = self._instructions[op_code](op1, op2, res)
if ret is not None:
return int(ret)
def set_input(self, value):
self._input = value
with open('input.txt') as fp:
lines = fp.readlines()
code = [int(x) for x in lines[0].split(',')]
max_output = -1
tested = set()
for i in range(50000):
tmp = str(i)
tmp = tmp.rjust(5, '0')
phase = []
for s in tmp:
phase.append(min(4, int(s)))
for j in range(5):
if j not in phase:
tested.add(str(phase))
if str(phase) in tested:
continue
tested.add(str(phase))
amps = [IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code)]
output = 0
for j, amp in enumerate(amps):
try:
amp.set_input([phase[j], output])
while True:
amp.execute()
except IntCodeComputer.HaltException as e:
output = e.args[0]
if output > max_output:
max_output = output
print('Part 1: ' + str(max_output))
max_output = -1
tested = set()
for i in range(55555, 100000):
tmp = str(i)
tmp = tmp.rjust(5, '5')
phase = []
for s in tmp:
if int(s) < 5:
s = 5
phase.append(int(s))
for j in range(5, 10):
if j not in phase:
tested.add(str(phase))
if str(phase) in tested:
continue
tested.add(str(phase))
amps = [IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code),
IntCodeComputer(code)]
output = 0
first_round = True
done = False
final_output = -1
while True:
for j, amp in enumerate(amps):
try:
if first_round:
amp.set_input([phase[j], output])
else:
amp.set_input([output])
amp._inputs_read = 0
output = amp.execute()
except IntCodeComputer.HaltException as e:
final_output = amps[4]._output
done = True
break
first_round = False
if done:
break
if final_output > max_output:
max_output = final_output
print('Part 2: ' + str(max_output))
| [
"cjuuub@gmail.com"
] | cjuuub@gmail.com |
cdc5880c732887ae0045c165fab12bc1d40ba29e | 8049dd81d52e0659054b574323887cf06dbb03a9 | /api/environments/identities/traits/fields.py | 8785523d5b9ea1caaa0a8b84673a1063cf1a6a6d | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | funetes/flagsmith | b6bfdb79e023a6e8832e898b13635e36486e5aa2 | 97dba4175fac4e723e7bc4208e004d61e748eed7 | refs/heads/main | 2023-06-21T08:18:53.531171 | 2021-07-26T16:47:01 | 2021-07-26T16:47:01 | 389,799,101 | 1 | 0 | NOASSERTION | 2021-07-26T23:57:23 | 2021-07-26T23:57:23 | null | UTF-8 | Python | false | false | 863 | py | from rest_framework import serializers
from environments.identities.traits.constants import ACCEPTED_TRAIT_VALUE_TYPES
from features.value_types import STRING
import logging
logger = logging.getLogger(__name__)
class TraitValueField(serializers.Field):
"""
Custom field to extract the type of the field on deserialization.
"""
def to_internal_value(self, data):
data_type = type(data).__name__
if data_type not in ACCEPTED_TRAIT_VALUE_TYPES:
data = str(data)
data_type = STRING
return {"type": data_type, "value": data}
def to_representation(self, value):
return_value = value.get("value") if isinstance(value, dict) else value
if return_value is None:
logger.warning("Trait value is not an accepted type. Value was %s" % value)
return return_value
| [
"matthewe@solidstategroup.com"
] | matthewe@solidstategroup.com |
7d2dde6910c954c595920775108c7d104df71591 | 1002039191002a3eac8ed6d78bb61d1f5950e456 | /build/lib/ledger-py/ledger.py | 6f44601e4a4feeca4a5c03d0f219bc5b7357d0e9 | [
"Apache-2.0"
] | permissive | isabella232/ledger-py | c73fb5fdb7e97539721e8b91574c51fb23782653 | d5ba3e3ca25126f5a8fa2cf2a6d6ba98b6be265d | refs/heads/master | 2023-03-19T17:26:53.244995 | 2019-02-11T17:45:50 | 2019-02-11T19:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,917 | py | from __future__ import print_function
import binascii
import time
from ledgerblue import commU2F, comm
from ledgerblue.commException import CommException
CLA = 0x77
INS_VERSION = 0x00
INS_GETSTATE = 0x01
INS_PUBLIC_KEY = 0x03
INS_SIGN = 0x04
INS_SIGN_NEXT = 0x05
INS_TEST_PK_GEN_1 = 0x80
INS_TEST_PK_GEN_2 = 0x81
INS_TEST_CALC_PK = 0x82
INS_TEST_WRITE_LEAF = 0x83
INS_TEST_READ_LEAF = 0x84
INS_TEST_KEYGEN = 0x85
INS_TEST_DIGEST = 0x86
INS_TEST_SETSTATE = 0x87
INS_TEST_COMM = 0x88
last_error = 0
APPMODE_NOT_INITIALIZED = 0x00
APPMODE_KEYGEN_RUNNING = 0x01
APPMODE_READY = 0x02
VERSION_MIN_MAJOR = 0
VERSION_MIN_MINOR = 2
VERSION_MIN_PATCH = 0
class LedgerQRL(object):
SCRAMBLE_KEY = "QRL"
U2FMODE = True
DEBUGMODE = False
def __init__(self):
self.last_error = None
self._connected = False
self._test_mode = False
self._version_major = None
self._version_minor = None
self._version_patch = None
self._mode_str = 'unknown'
self._mode_code = -1
self._pk_raw = None
self._otsindex = None
@property
def connected(self):
return self._connected
@property
def test_mode(self):
return self._test_mode
@property
def version(self):
return "{}.{}.{}".format(self._version_major, self._version_minor, self._version_patch)
@property
def mode(self):
return self._mode_str
@property
def mode_code(self):
return self._mode_code
@property
def pk(self):
if self._pk_raw is None:
return None
return binascii.hexlify(self._pk_raw).decode()
@property
def pk_raw(self):
return self._pk_raw
def print_info(self):
if self._test_mode:
print("WARNING! TEST MODE ENABLED")
print("Version : {}".format(self.version))
print("Mode : {}".format(self.mode))
print("XMSS Index : {}".format(self._otsindex))
if self.pk:
print("Public Key : {}".format(self.pk))
def connect(self):
self.U2FMODE = False
# Check version
answer = self.send(INS_VERSION)
if answer is None:
return False
self._test_mode = (answer[0] != 0)
self._version_major = answer[1]
self._version_minor = answer[2]
self._version_patch = answer[3]
answer = self.send(INS_GETSTATE)
if answer is None:
return False
self._mode_code = answer[0]
self._mode_str = "Unknown"
self._otsindex = 1 + answer[1] + answer[2] * 256
if answer[0] == APPMODE_NOT_INITIALIZED:
self._mode_str = "not initialized"
if answer[0] == APPMODE_KEYGEN_RUNNING:
self._mode_str = "keygen running"
if answer[0] == APPMODE_READY:
self._mode_str = "ready"
answer = self.send(INS_PUBLIC_KEY)
self._pk_raw = answer
self._connected = True
def sign(self, message):
if not self.connected:
raise Exception("Device is not connected or reacheable")
print("Signing...")
answer = self.send(INS_SIGN, 0, 0, message)
if answer is None:
raise Exception("Sign Operation Rejected")
signature = b''
for i in range(11):
answer = self.send(INS_SIGN_NEXT)
answer = binascii.hexlify(answer).upper()
signature += answer
if self.DEBUGMODE:
print("[{}] {}".format(len(answer) / 2, answer))
if self.DEBUGMODE:
print("[{}] {}".format(len(signature) / 2, signature))
return binascii.unhexlify(signature)
def send(self, ins, p1=0, p2=0, params=None):
answer = None
if params is None:
params = bytearray([])
start = time.time()
dongle = None
try:
if self.U2FMODE:
dongle = commU2F.getDongle(scrambleKey=self.SCRAMBLE_KEY, debug=self.DEBUGMODE)
else:
dongle = comm.getDongle(debug=self.DEBUGMODE)
cmd = bytearray([CLA, ins, p1, p2, len(params)]) + params
answer = dongle.exchange(cmd)
except CommException as e:
print("LEDGERQRL: COMMEXC: ", e)
self.last_error = e.sw
except Exception as e:
print("LEDGERQRL: Exception: ", e)
except BaseException as e:
print("LEDGERQRL: BaseException: ", e)
finally:
if dongle is not None:
dongle.close()
if self.U2FMODE:
if answer is not None:
if self.DEBUGMODE:
print("U2F[{}]: {}".format(len(answer), binascii.hexlify(answer)))
answer = answer[5:]
end = time.time()
if self.DEBUGMODE:
print(end - start)
return answer
| [
"lenijuan@gmail.com"
] | lenijuan@gmail.com |
da86618e30e85aae3e54ae904612bf01d1944979 | 1d8a4659d4a13cd8b0244918484990bb000687ea | /OpenPROD/openprod-addons/warning/__openerp__.py | 7079195abbc6d671c4b982be802c33c560e05c00 | [] | no_license | kazacube-mziouadi/ceci | d8218ede129186c26eb36f251ef42f07c7a74883 | eb394e1f79ba1995da2dcd81adfdd511c22caff9 | refs/heads/master | 2020-03-23T00:22:51.501409 | 2018-07-13T14:58:32 | 2018-07-13T14:58:32 | 140,859,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf-8 -*-
{
'name' : 'Warning',
'version' : '1.0',
'author' : 'Objectif PI',
'license': 'Open-prod license',
'category' : 'Warning',
'description' : """ Warning for purchase and sale """,
'website': 'http://objectif-pi.com',
'images' : [],
'depends' : [
'sale_purchase',
],
'data': [
'warning_view.xml',
],
'qweb' : [],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
} | [
"mziouadi@kazacube.com"
] | mziouadi@kazacube.com |
21670ab493e727171b5ff7e9fb9f53c361d25571 | ea4567b4388ea97c8ca718d9e331dc796439ee44 | /app_autonation/appium_learn/kyb_project_test/page_object/baseView/common_view.py | d272538271555b6448dfea7611053be8fa050229 | [] | no_license | Kingwolf9527/python_knowledge | ace65470ec706cae195b228b8e8d6ca8db574db8 | 1ccb3a788c172f3122a7c119d0607aa90934e59b | refs/heads/master | 2020-12-04T06:12:49.809020 | 2020-02-10T18:22:36 | 2020-02-10T18:22:44 | 231,647,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | # ! /usr/bin/env python
# - * - coding:utf-8 - * -
# __author__ : KingWolf
# createtime : 2019/4/5 6:39
from appium_learn.kyb_project_test.page_object.baseView.base_view import BaseView
from appium_learn.kyb_project_test.utils.common_logs import Common_log
from appium_learn.kyb_project_test.utils.driver import Driver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
import time
import os
import csv
logger = Common_log(logger='commom_view').get_logger()
class Common_view(BaseView):
skipbtn = (By.ID,'com.tal.kaoyan:id/tv_skip')
def check_skipbtn(self):
"""
检测是否存在跳过按钮
:return:
"""
logger.info('=================check skip_btn========================')
try:
# '跳过'按钮的定位
skip_btn = self.driver.find_element(*self.skipbtn)
except NoSuchElementException:
logger.error('====================no shipbtn======================')
else:
skip_btn.click()
logger.info('=====================pass the skip-page==================')
def get_screensize(self):
"""
获取屏幕尺寸
:return:
"""
x = self.get_window_size()['width']
y = self.get_window_size()['height']
return x,y
def swipe_left(self):
"""
左滑操作
:return:
"""
logger.info('====================swipe left============================')
left = self.get_screensize()
x1 = int(left[0]*0.9)
y1 = int(left[1]*0.5)
x2 = int(left[0]*0.2)
self.swipe(x1,y1,x2,y1,duration=1000)
def swipe_up(self):
"""
上滑操作
:return:
"""
logger.info('====================swipe up============================')
up = self.get_screensize()
x1 = int(up[0]*0.5)
y1 = int(up[1]*0.9)
y2 = int(up[1]*0.2)
self.swipe(x1,y1,x1,y2,duration=500)
def strftime(self):
"""
格式化时间
:return:
"""
self.time_strf = time.strftime('%Y-%m-%d %H_%M_%S',time.localtime(time.time()))
return self.time_strf
def get_screenshot_image(self,moudle_file):
"""
截图
:param moudle:
:return:
"""
image_fun = self.strftime()
image_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + '/screenshots_images/'
#判断保存截图的文件夹是否存在
isexists = os.path.exists(image_dir)
if not isexists:
try:
os.makedirs(image_dir)
except FileExistsError:
logger.info('============================NO file========================')
image_name = os.path.join(image_dir,'%s_%s.png' %(moudle_file,image_fun))
logger.info('=============================get %s screenshots===============================' %moudle_file)
self.driver.get_screenshot_as_file(image_name)
| [
"lccr777@163.com"
] | lccr777@163.com |
942e3c78465074f84fa30fae5282f58e4127a871 | 0d94cd7ceb1bd3539d5ac8322870d3583381f2fa | /board/migrations/0001_initial.py | 70cff57a083bc540a8e3406e954c7f93f8dbf29f | [] | no_license | testestzxcv/mysite | 52d89323c150a6df191e056cc85fbec05e69aed1 | 08097b57e7a226e4953758898365863cc16a8524 | refs/heads/master | 2020-03-22T03:37:23.352514 | 2018-07-07T19:58:51 | 2018-07-07T19:58:51 | 139,442,333 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # Generated by Django 2.0.6 on 2018-06-29 06:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.CharField(max_length=2000)),
('hit', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
]
| [
"illuillu55@hotmail.com"
] | illuillu55@hotmail.com |
dbd181920e86d9e4a2ed6b1d93d1b0e04582e7c3 | 69a327a2af65d7252b624fe7cadd537eb51ca6d6 | /BFS DFS/BOJ_7576.py | ea2a8033621b111e606b49523ea8b8130bc820b4 | [] | no_license | enriver/algorithm_python | 45b742bd17c6a2991ac8095d13272ec4f88d9bf5 | 77897f2bf0241756ba6fd07c052424a6f4991090 | refs/heads/master | 2023-09-03T23:28:23.975609 | 2021-10-29T09:25:32 | 2021-10-29T09:25:32 | 278,907,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #토마토 - S1
import sys
from collections import deque
M,N =map(int, sys.stdin.readline().split())
visit=[[False for _ in range(M)] for _ in range(N)]
mat=[[False for _ in range(M)] for _ in range(N)]
for i in range(N):
tomato=list(map(int,sys.stdin.readline().split()))
for j in range(M):
mat[i][j]=tomato[j]
dx=[1,-1,0,0]
dy=[0,0,-1,1]
def bfs():
que=deque()
day=-1
for i in range(N):
for j in range(M):
if mat[i][j]==-1:
visit[i][j]=True
if mat[i][j]==1 and not visit[i][j]:
visit[i][j]=True
que.append((i,j,0))
while que:
x,y,day=que.popleft()
for i in range(4):
nx,ny=x+dx[i],y+dy[i]
if 0<=nx<N and 0<=ny<M:
if mat[nx][ny]==0 and not visit[nx][ny]:
visit[nx][ny]=True
que.append((nx,ny,day+1))
return day
def solve(days):
for i in range(N):
for j in range(M):
if visit[i][j]==False:
return -1
return days
day=bfs()
print(solve(day)) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
a7dbfaf1a25511350c5610b3b3be9eaab5482fe6 | 3ea99519e25ec1bb605947a94b7a5ceb79b2870a | /modern_python/modernpython/lib/mypy/typeshed/stdlib/2and3/pprint.pyi | 70780862c8f3a3fa94310b8a03d3d85215da525a | [] | no_license | tech-cow/spazzatura | 437c7502a0654a3d3db2fd1e96ce2e3e506243c0 | 45fc0932186d2ef0c5044745a23507a692cfcc26 | refs/heads/master | 2022-09-01T12:01:11.309768 | 2018-11-15T04:32:03 | 2018-11-15T04:32:03 | 130,414,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | pyi | # Stubs for pprint
# Based on http://docs.python.org/2/library/pprint.html
# Based on http://docs.python.org/3/library/pprint.html
from typing import Any, Dict, Tuple, IO
def pformat(o: object, indent: int = ..., width: int = ...,
depth: int = ...) -> str: ...
def pprint(o: object, stream: IO[str] = ..., indent: int = ..., width: int = ...,
depth: int = ...) -> None: ...
def isreadable(o: object) -> bool: ...
def isrecursive(o: object) -> bool: ...
def saferepr(o: object) -> str: ...
class PrettyPrinter:
def __init__(self, indent: int = ..., width: int = ..., depth: int = ...,
stream: IO[str] = ...) -> None: ...
def pformat(self, o: object) -> str: ...
def pprint(self, o: object) -> None: ...
def isreadable(self, o: object) -> bool: ...
def isrecursive(self, o: object) -> bool: ...
def format(self, o: object, context: Dict[int, Any], maxlevels: int,
level: int) -> Tuple[str, bool, bool]: ...
| [
"yuzhoujr@yuzhou-7480.internal.synopsys.com"
] | yuzhoujr@yuzhou-7480.internal.synopsys.com |
3b3e2164786de5916c5320c67bbf2c79a0bbb935 | 0217e04227849e7dede34397358bab51d47a6b0d | /all python/lesson 9-1.py | f60840f02176d528ad13746ee2459c59d06acc59 | [] | no_license | ZohanHo/ClonePPZ | 4d370a7f924ad3835ac967487c4b91ef913e488d | 43c9be8390c3d714d333793425b838a2aca39a7e | refs/heads/master | 2020-03-21T07:27:49.987171 | 2018-10-16T14:29:44 | 2018-10-16T14:29:44 | 138,281,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | """"""
"""Функции"""
"""Переменное число параметров
Иногда бывает нужно определить функцию, способную принимать любое число параметров.
Этого можно достичь при помощи звёздочек
"""
def total(initial, *numbers, **keywords): #
count = initial
for number in numbers:
count += number # Так как ето кортеж, то при помощи цыкла добавляем 10, 1, 2, 3
for key in keywords:
count += keywords[key] #Так как ето словарь, по ключу получаем значение, при помощи цыкла добавляем 50, 100
return count # возвращаем полученую сумму
print(total(10, 1, 2, 3, vegetables=50, fruits=100))
"""Когда мы объявляем параметр со звёздочкой (например, *param), все позиционные
аргументы начиная с этой позиции и до конца будут собраны в кортеж
под именем param.
Аналогично, когда мы объявляем параметры с двумя звёздочками (**param),
все ключевые аргументы начиная с этой позиции и до конца будут собраны
в словарь под именем param """
def total(initial, *numbers, extra_number):
count = initial
for number in numbers: # добавит все значения с полученого кортежа
count += number
count += extra_number # просто добавит переменную
print(count)
total(10, 1, 2, 3, extra_number=50) | [
"serduk.evgeniy@gmail.com"
] | serduk.evgeniy@gmail.com |
4413523012dc010eee4ff8bf1e033cfb18043c03 | f41309da5e0d26b24d974a009fa309a02fcaa20c | /aws_cloudtrail_rules/aws_kms_cmk_loss.py | e43ebeeaa1f3480be7cc01ae37e5a38bebadd7ab | [
"Apache-2.0"
] | permissive | georgeSkoumas/panther-analysis | 2e1e87f83c6533cb6d62ecb62e3f61b2ff4b5ed4 | 30b21c270504bf7c84f99207c9c6c2f6110843ae | refs/heads/master | 2022-09-14T13:22:31.786275 | 2020-05-26T16:18:58 | 2020-05-26T16:18:58 | 267,569,230 | 1 | 0 | Apache-2.0 | 2020-05-28T11:15:05 | 2020-05-28T11:15:04 | null | UTF-8 | Python | false | false | 363 | py | # API calls that are indicative of KMS CMK Deletion
KMS_LOSS_EVENTS = {'DisableKey', 'ScheduleKeyDeletion'}
KMS_KEY_TYPE = 'AWS::KMS::Key'
def rule(event):
return event['eventName'] in KMS_LOSS_EVENTS
def dedup(event):
for resource in event['resources']:
if resource['type'] == KMS_KEY_TYPE:
return resource['ARN']
return None
| [
"noreply@github.com"
] | georgeSkoumas.noreply@github.com |
540e139a516c8112554fda4945422e3fef701106 | e2ea29096d3a394f8b013fd96f0ae6121d2529f3 | /config.py | 3dca9e9d47cd50b32b1a11b692d12ca536b68723 | [
"MIT"
] | permissive | gottuantuan/info_got | f9d75a0d6a1b67938213b3f1edd8b227192f5792 | 80ee1e7005a7ee9d9fea20f84b5636cc3cc7c527 | refs/heads/master | 2020-03-21T15:34:46.698263 | 2018-07-08T09:34:42 | 2018-07-08T09:34:42 | 138,720,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from redis import StrictRedis
class Config:
DEBUG = True
SECRET_KEY = 'TDxS5BoJlW6zUFhOZhTHwu3X2OpVFIHIbZdA+JZAgD1Uz92XMZFJNA=='
SQLALCHEMY_DATABASE_URI = 'mysql://root:mysql@localhost/info_got'
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
SESSION_TYPE = 'redis'
SESSION_REDIS = StrictRedis(host=REDIS_HOST,port=REDIS_PORT)
SESSION_USE_SIGNER = True
PERMANENT_SESSION_LIFETIME = 86400
class developmentConfig(Config):
DEBUG = True
class productionConfig(Config):
DEBUG = False
config = {
'develop': developmentConfig,
'production': productionConfig
} | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
4d693cf36154579d9a72fef92e51d7a6dfe448f2 | 8409c8e7a0bde0a08b0da25de245b3f6cf7d98e0 | /test/test_data/get_test_data.py | ec29b15a7d999cceea9db51b1df079cc83947c90 | [
"Apache-2.0"
] | permissive | teogale/fairgraph | 816cd94b8496856832f6353f0670fbe58c2476a8 | 79c3f6860987e1987320b68c5c0073ff162649d7 | refs/heads/master | 2022-11-17T17:28:48.619559 | 2020-06-24T13:45:55 | 2020-06-24T13:45:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,010 | py | """
Script to retrieve/update test data
"""
import os.path
from itertools import cycle
import json
from fairgraph import KGClient, core, brainsimulation, electrophysiology, minds, uniminds, analysis
core.use_namespace("neuralactivity")
client = KGClient()
def save_nexus_query_result(module, cls):
path = "/data/{}/?fields=all&size=10&deprecated=False".format(cls.path)
response = client._nexus_client._http_client.get(path)
fix_fields(response)
filename = "{}_list_0_10.json".format(cls.__name__.lower())
local_path = os.path.join("nexus",
module.__name__.split(".")[1],
filename)
if "next" in response["links"]:
del response["links"]["next"]
with open(local_path, "w") as fp:
json.dump(response, fp, indent=4)
def save_kg_query_result(module, cls, query_label):
query_id = {"simple": cls.query_id,
"resolved": cls.query_id_resolved}[query_label]
path = "{}/{}/instances?start=0&size=10&databaseScope=INFERRED".format(
cls.path, query_id)
response = client._kg_query_client.get(path)
fix_fields(response)
filename = "{}_list_{}_0_10.json".format(cls.__name__.lower(), query_label)
local_path = os.path.join("kgquery",
module.__name__.split(".")[1],
filename)
with open(local_path, "w") as fp:
json.dump(response, fp, indent=4)
def save_kg_query_spec(module, cls, query_label):
query_id = {"simple": cls.query_id,
"resolved": cls.query_id_resolved}[query_label]
spec = cls.retrieve_query(query_id, client)
filename = "{}_{}_query.json".format(cls.__name__.lower(), query_label)
local_path = os.path.join("kgquery",
module.__name__.split(".")[1],
filename)
with open(local_path, "w") as fp:
json.dump(spec, fp, indent=4)
def fix_fields(data):
replacement_names = cycle([
"Neil;Armstrong;neil.armstrong@nasa.gov",
"Yvonne;Brill;unknown@example.com",
"Frank;Whittle;unknown@example.com",
"Johanna;Weber;unknown@example.com",
"Frank;Borman;unknown@example.com",
"Kalpana;Chawla;unknown@example.com",
"Igor;Sikorsky;unknown@example.com",
"Elsie;McGill;unknown@example.com",
"Leonardo;da Vinci;unknown@example.com",
"Katherine;Johnson;unknown@example.com"
])
def nested_dict_iter(obj):
if isinstance(obj, dict):
for key, value in obj.items():
#print(key)
if isinstance(value, (list, dict)):
nested_dict_iter(value)
else:
if key == "givenName" and "schema" not in value:
obj["givenName"], obj["familyName"], obj["email"] = next(replacement_names).split(";")
break
elif key == "http://schema.org/givenName":
obj["http://schema.org/givenName"], obj["http://schema.org/familyName"], obj["http://schema.org/email"] = next(replacement_names).split(";")
break
elif isinstance(value, str) and "object.cscs.ch" in value:
obj[key] = value[:31]
elif isinstance(obj, (list, dict)):
for item in obj:
nested_dict_iter(item)
else:
pass
data = nested_dict_iter(data)
return data
for module in (core, brainsimulation, electrophysiology, minds, uniminds, analysis:
for cls in module.list_kg_classes():
save_nexus_query_result(module, cls)
for label in ("simple", "resolved"):
save_kg_query_result(module, cls, label)
save_kg_query_spec(module, cls, label)
cls.store_queries(client)
#save_kg_query_result(brainsimulation, brainsimulation.MEModel, "resolved")
#save_nexus_query_result(uniminds, uniminds.Person)
| [
"andrew.davison@unic.cnrs-gif.fr"
] | andrew.davison@unic.cnrs-gif.fr |
6b89f173e48952fc9dccb0e50711c056b8d3333f | cd872ab5bce8dd111952af4721c5f270e7236344 | /edsys_transfer_certificate/wizard/reminder_tc_form.py | 20a5303504e0cf6188a664802b5d94f40be8d8f9 | [] | no_license | probytesodoo/edsys_school_erp | 26e2dd24d907bf76be00ac17872d84ae3a5fdf7e | 0e65e5d937b029beb69563772197b9b050748407 | refs/heads/master | 2020-04-15T22:21:02.623926 | 2019-03-12T13:47:03 | 2019-03-12T13:47:03 | 165,069,627 | 1 | 2 | null | 2019-01-16T09:41:01 | 2019-01-10T14:00:36 | Python | UTF-8 | Python | false | false | 1,525 | py | from odoo import models, fields, api, _
from datetime import datetime
from odoo.exceptions import except_orm
import base64
class SendReminderTransferCertificate(models.TransientModel):
_name = 'send.reminder.tc.form.wiz'
@api.multi
def resend_tc_form_link(self):
"""
this method is use to resend request for
TC form link.
:return:
"""
active_ids = self._context['active_ids']
tc_obj = self.env['trensfer.certificate']
for tc_student_rec in tc_obj.browse(active_ids):
if tc_student_rec.tc_form_filled == True:
raise except_orm(_("Warning!"), _(' Already Transfer Certificate form is filled by the student %s.')
%(tc_student_rec.name.name))
if tc_student_rec.tc_form_filled != True:
email_server=self.env['ir.mail_server']
email_sender=email_server.search([])
ir_model_data = self.env['ir.model.data']
template_id = ir_model_data.get_object_reference('edsys_transfer_certificate', 'email_template_resend_tc_form_email')[1]
template_rec = self.env['mail.template'].browse(template_id)
template_rec.write({'email_to' : tc_student_rec.name.parents1_id.parents_email,'email_from':email_sender.smtp_user, 'email_cc': ''})
template_rec.send_mail(tc_student_rec.id, force_send=True)
tc_student_rec.write({'last_date_of_tc_request_form':datetime.now(),})
| [
"redbytes.test@gmail.com"
] | redbytes.test@gmail.com |
5c45298213bb1becf844115c6950bc4bc2f3fbfb | d0ea6a5771613f4f768a50cef19f74d285a97b76 | /tests/test_motif_count.py | deee73af0fb6c874f8347597256822a02ef6d5a0 | [] | no_license | Botbb/MutationMotif | 241e1c366af0d88d413cc5e8c9b1d2b69f9ca2e8 | 923b645f00417a62120212d38b65ba0317273f09 | refs/heads/master | 2023-05-12T19:12:16.889129 | 2019-09-25T22:22:53 | 2019-09-25T22:22:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,252 | py | from cogent3 import DNA
from cogent3.core.alignment import ArrayAlignment
from cogent3.util.unit_test import TestCase, main
from mutation_motif.motif_count import profile_to_seq_counts, get_count_table,\
reduced_multiple_positions,\
reduced_one_position, get_combined_counts
obs = \
"""
TATGT
TATTT
CTTTA
TATCA
TATCA
CTTCT
""".splitlines()
obs = [("s%s" % i, s) for i, s in enumerate(obs) if s]
ctl = \
"""
TCTTC
TCTTC
GGTAA
AGTGT
GCTGG
GGTAG
""".splitlines()
ctl = [("s%s" % i, s) for i, s in enumerate(ctl) if s]
def _get_seq_array(data):
"""returns [(n, seq), ...] as DenseArray"""
return ArrayAlignment(data=data, moltype=DNA).array_seqs
class TestMotifCount(TestCase):
obs_count = profile_to_seq_counts(_get_seq_array(obs), 2)
ctl_count = profile_to_seq_counts(_get_seq_array(ctl), 2)
table = get_count_table(obs_count, ctl_count, 4)
def test_profile_to_counts(self):
'''correctly determine counts of motif flanking central base'''
seqs = _get_seq_array(obs)
result = profile_to_seq_counts(seqs, 2)
exp = dict(TAGT=1, TATT=1, CTTA=1, TACA=2, CTCT=1)
self.assertEqual(result, exp)
def test_counts_table(self):
"""construct counts table"""
obs_count = profile_to_seq_counts(_get_seq_array(obs), 2)
ctl_count = profile_to_seq_counts(_get_seq_array(ctl), 2)
r = get_count_table(obs_count, ctl_count, 4)
self.assertEqual(r.distinct_values("mut"), set("MR"))
# because the motifs are unique in ctl and obs
# total number should double
self.assertEqual(r.shape[0], 2 * (len(obs_count) + len(ctl_count)))
def test_reduced(self):
"""reduced across positions should produce correct counts"""
r = reduced_multiple_positions(self.table, 'pos0')
exp = {'R': {("A",): 1, ("C",): 0, ("G",): 3, ("T",): 2},
'M': {("A",): 0, ("C",): 2, ("G",): 0, ("T",): 4}
}
self.assertEqual(r, exp)
r = reduced_multiple_positions(self.table, 'pos2')
exp = {'R': {("A",): 2, ("C",): 0, ("G",): 2, ("T",): 2},
'M': {("A",): 0, ("C",): 3, ("G",): 1, ("T",): 2}
}
self.assertEqual(r, exp)
r = reduced_multiple_positions(self.table, 'pos0', 'pos2')
exp = {'R': {("A", "G"): 1, ("C", "C"): 0, ("C", "T"): 0,
("G", "G"): 1, ("G", "A"): 2, ("T", "C"): 0,
("T", "G"): 0, ("T", "T"): 2},
'M': {("A", "G"): 0, ("C", "C"): 1, ("C", "T"): 1,
("G", "G"): 0, ("G", "A"): 0, ("T", "C"): 2,
("T", "G"): 1, ("T", "T"): 1}
}
self.assertEqual(r, exp)
def test_reduced_one(self):
"""should give same results as more general"""
r = reduced_multiple_positions(self.table, 'pos0')
got = reduced_one_position(self.table, 'pos0')
exp = {}
for m, counts in list(r.items()):
counts = dict((k[0], c) for k, c in list(counts.items()))
exp[m] = counts
self.assertEqual(got, exp)
r = reduced_multiple_positions(self.table, 'pos2')
exp = {}
for m, counts in list(r.items()):
counts = dict((k[0], c) for k, c in list(counts.items()))
exp[m] = counts
got = reduced_one_position(self.table, 'pos2')
self.assertEqual(got, exp)
def test_combined_counts(self):
"""combining counts completes missing states"""
combined = get_combined_counts(self.table, 'pos0')
self.assertEqual(combined.shape[0], 8)
combined = get_combined_counts(self.table, ['pos0', 'pos1'])
self.assertEqual(combined.shape[0], 32)
# the following states are not present in either group for pos0/1
# and so should be missing
missing = [['A', 'A'], ['A', 'C'], ['A', 'T'],
['C', 'C'], ['C', 'G'], ['C', 'A'],
['G', 'A'], ['G', 'T'],
['T', 'G'], ['T', 'T']]
for b1, b2 in missing:
sub = combined.filtered("base1=='%s' and base2=='%s'" % (b1, b2))
self.assertEqual(sub.summed('count'), 0)
if __name__ == '__main__':
main()
| [
"Gavin.Huttley@anu.edu.au"
] | Gavin.Huttley@anu.edu.au |
bf0a12828134dad389d7537db4d97acc9e8aad7a | 00689951be97b3e9e3a036aca64efaa1ee59134a | /aula016 - TUPLAS/aula016.py | 49c4c564ab64b8e2900d01967a25f3a8a068afcf | [
"MIT"
] | permissive | miradouro/CursoEmVideo-Python | 4826cf387cc9424e675f2b115842a643f2d67c8d | cc7b05a9a4aad8e6ef3b29453d83370094d75e41 | refs/heads/main | 2023-03-24T08:51:34.183169 | 2021-03-20T22:15:02 | 2021-03-20T22:15:02 | 349,843,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | #TUPLAS
#TUPLAS SÃO IMUTAVEIS
lanche = ('hamburguer', 'suco', 'pizza', 'pudim')
print('-=-'*20)
print(len(lanche))
print('-=-'*20)
for comida in lanche:
print(f'Eu vou comer {comida}')
print('-=-'*20)
for cont in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont]}')
print('-=-'*20)
for pos, food in enumerate(lanche):
print(f'Eu vou comer {food} na posição {pos}')
print('-=-'*40)
print('Comi pra caramba!')
print('-=-'*40)
print(sorted(lanche))#SORTED COLOCA EM ORDEM ALFABETICA
print(lanche)
| [
"rafaelmiradouro@gmail.com"
] | rafaelmiradouro@gmail.com |
c589a1f68ea25cd0c36f6c8bd83ed1461b01f8aa | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/September/Robot Bounded In Circle.py | 79b9c86524d752918a535cbc48b1b8f389d37b19 | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | class Solution:
def isRobotBounded(self, instructions: str) -> bool:
x = 0
y = 0
direction = 0
for instruction in instructions:
if instruction == 'L':
direction -= 1
elif instruction == 'R':
direction += 1
else:
if direction < 0:
cardinality = (4 - (direction % 4)) % 4
else:
cardinality = direction % 4
if cardinality == 0:
y += 1
elif cardinality == 1:
x += 1
elif cardinality == 2:
y -= 1
else:
x -= 1
if direction < 0:
cardinality = (4 - (direction % 4)) % 4
else:
cardinality = direction % 4
if (x == 0 and y == 0) or cardinality != 0:
return True
return False
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
ff24a9b47c7c21cec5b972b59ef73f0b3058214b | e511cdd3114e0d3611422e5f82eef7fc0f839402 | /407.mask_moments.py | 262bef04e6eaa4baabd065203a31d76df9659403 | [] | no_license | GiantMolecularCloud/NGC253-outflow-at-2pc | 46898bb4d93ed1f30f5d24e6ee7d7efbe1d79a20 | b7ee55cc2f4bebf5af8912a662765d5ceed16977 | refs/heads/main | 2023-02-26T23:32:05.408835 | 2021-02-05T16:39:22 | 2021-02-05T16:39:22 | 336,310,328 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,695 | py | ########################
# CASA ANALYSIS SCRIPT #
########################
# Get estimates for various quantities for disk (inside disk mask) and the rest (mainly outflows).
###################################################################################################
# import required modules
execfile('scripts/casa_imports.py')
execfile('NGC253/project_info.py')
###################################################################################################
# load sub project info
execfile(os.path.join(projectdir, 'info.py'))
###################################################################################################
# get moments
#############
def masked_moments(inps):
dataset = inps[0]
SNR = inps[1] # 3.0 / 5.0 / 10.0 / 25.0 / 50.0 / 75.0 / 100.0
kin_type = inps[2] # disk / non-disk
ax_type = inps[3] # major / minor
masked_cube = os.path.join(dnddir, dataset['cube']+'.ppV_mask_'+ax_type+'.'+kin_type+'.'+str(SNR)+'s.image')
for mom in [0,1,2,4]:
mom_map = os.path.join(dnddir, dataset['cube']+'.ppV_mask_'+ax_type+'.'+kin_type+'.'+str(SNR)+'s.mom'+str(mom))
os.system('rm -rf '+mom_map)
immoments(imagename = masked_cube,
outfile = mom_map,
moments = [mom],
axis = 'spectral',
region = '',
chans = '',
includepix = [0,1000] # is needed for moment 4 despite the map being positive already
)
exportfits(imagename = mom_map,
fitsimage = mom_map+'.fits',
dropstokes = True,
dropdeg = True,
overwrite = True
)
###################################################################################################
# execute in parallel
#####################
inps = []
for dataset in datasets:
for SNR in SNRs:
for kin_type in ['disk','non-disk']:
for ax_type in ['major','minor']:
inps.append([dataset, SNR, kin_type, ax_type])
pool = Pool(30)
pool.map(masked_moments, inps)
###################################################################################################
# mask 0.0 values
#################
mom_files = [os.path.join(dnddir, dataset['cube']+'.ppV_mask_'+ax_type+'.'+kin_type+'.'+str(SNR)+'s.mom0.fits') for ax_type in ['major','minor'] for kin_type in ['disk','non-disk'] for dataset in datasets for SNR in SNRs]
for mom_file in tqdm(mom_files):
mom = fits.open(mom_file)[0]
mom.data[mom.data==0.0] = np.nan
fits.writeto(mom_file, data=mom.data, header=mom.header, overwrite=True)
####################################################################################################
# get log10
###########
mom_files = [os.path.join(dnddir, dataset['cube']+'.ppV_mask_'+ax_type+'.'+kin_type+'.'+str(SNR)+'s.mom0.fits') for ax_type in ['major','minor'] for kin_type in ['disk','non-disk'] for dataset in datasets for SNR in SNRs]
for mom_file in tqdm(mom_files):
mom = fits.open(mom_file)[0]
mom.data = np.log10(mom.data)
fits.writeto(mom_file.replace('.fits','.log.fits'), data=mom.data, header=mom.header, overwrite=True)
# also get log maps of non-separated moments
mom_files = [os.path.join(momdir, dataset['cube']+'.mask_'+str(SNR)+'s.mom0.fits') for dataset in datasets for SNR in SNRs]
for mom_file in tqdm(mom_files):
mom = fits.open(mom_file)[0]
mom.data = np.log10(mom.data)
fits.writeto(mom_file.replace('.fits','.log.fits'), data=mom.data, header=mom.header, overwrite=True)
###################################################################################################
| [
"26749617+GiantMolecularCloud@users.noreply.github.com"
] | 26749617+GiantMolecularCloud@users.noreply.github.com |
131b36051daac33a45673c17778698152c57961d | 7b9c01c9de12f7526110d769fcee97fd9195f7c0 | /quota_api.py | eeab2b56a0799d127fde37ff294d21a10168700e | [] | no_license | FengZiQ/webAPI | 0f26d60024ca75bc2d49f4dd63b699c8c07f010d | 48ad63e91f0151270a29ada4fd53cc50b0a8ead1 | refs/heads/master | 2021-01-18T12:01:17.748425 | 2018-03-02T02:28:20 | 2018-03-02T02:28:20 | 100,363,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | # coding = utf-8
# 2017.10.20
from remote import server
import json
from to_log import tolog
from result_assert import result_assert
from find_unconfigured_pd_id import find_pd_id
def precondition():
pdId = find_pd_id('4TB')
# create pool
server.webapi('post', 'pool', {"name": "test_quota_API", "pds": pdId, "raid_level": "raid5"})
# create nasShare
server.webapi('post', 'nasshare', {'pool_id': 0, 'name': 'test_protocol_api', 'capacity': '2GB'})
# create clone
server.webapi('post', 'snapshot', {"name": "test_quota_API_s", "type": 'nasshare', "source_id": 0})
server.webapi('post', 'clone', {"name": "test_quota_API_c", "source_id": 0})
def get_quota_by_default_path_parameter():
# precondition
# precondition()
# test data
id = ['nasshare_0', 'clone_0']
response = ['status', 'qt_list']
for i in range(len(id)):
tolog('Expect: \r\n' + str(response) + '\r\n')
result = server.webapi('get', 'quota/' + id[i])
if isinstance(result, str):
result_assert.FailFlag = True
tolog('Fail: ' + result + '\r\n')
else:
check = json.loads(result["text"])[0]
tolog('Actual: \r\n' + str(check.keys()).replace('u', '') + '\r\n')
if len(response) != len(check.keys()):
result_assert.FailFlag = True
tolog('Fail: please check out response parameters count\r\n')
else:
for key in check:
if key not in response:
result_assert.FailFlag = True
tolog('Fail: ' + key + ' is not in response\r\n')
result_assert.result_assert()
if __name__ == "__main__":
get_quota_by_default_path_parameter() | [
"feng1025352529@qq.com"
] | feng1025352529@qq.com |
18d0001a35b748ea0f59dfb829a9eb30ad139a2c | 7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0 | /src/igem/neutronics/water/bare/water-backfill/75wt/plot_all.in.one_cask.thickness_dose.rate_t4045_south.py | 1a88946674ce20e490f4b8b832590f2d3eb7cf32 | [] | no_license | TheDoctorRAB/plot | dd3b5134c91c8fa7032fcc077c5427b26a80e49d | ed6746d511222c03e79f93548fe3ecd4286bf7b1 | refs/heads/master | 2021-07-11T10:21:19.347531 | 2020-07-16T17:13:15 | 2020-07-16T17:13:15 | 20,462,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,114 | py | ########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - South plate'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=7000
#
annotate_title2='Water backfill'
annotate_x2=23
annotate_y2=4000
#
annotate_title3='75 wt% $^{10}B$'
annotate_x3=23
annotate_y3=1000
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=1
ymax=10000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# plot.show()
#
########################################################################
#
# EOF
#
########################################################################
| [
"borrelli@localhost.localdomain"
] | borrelli@localhost.localdomain |
3dd727b47b988f682037366f33e36cac61869518 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/166/usersdata/272/70771/submittedfiles/maiormenor.py | 197104c23e9d671f7d05c733d1056549da7ce327 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # -*- coding: utf-8 -*-
import math
a = int(input('Digite o número 1: '))
b = int(input('Digite o número 2: '))
c = int(input('Digite o número 3: '))
d = int(input('Digite o número 4: '))
e = int(input('Digite o número 5: '))
#CONTINUE...
if (a<b<c<d<e):
print(a)
elif (b<a<c<d<e):
print(b)
elif (c<b<a<d<e):
print(c)
elif (d<c<b<a<e):
print(d)
elif (e<d<c<b<a):
print(e)
if (a>b>c>d>e):
print(a)
elif (b>a>c>d>e):
print(b)
elif (c>b>a>d>e):
print(c)
elif (d>c>b>a>e):
print(d)
elif (e>d>c>b>a):
print(e)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
507287d7a42d908659c94b30bd3e0d062f12aea0 | 45a0434de7cb5aaf51f372a9ea39c2e62528e8d7 | /main_train_hier_att_fsoftmax_v1.py | 4064ad4f9536505ed8836fe51d031fdf509a9ff9 | [] | no_license | hongtaowutj/Seq2Seq-Keyphrase-Generation | 44b5b24f3af7a85c24fc5ef231c53c1dac7e48ff | 6f2d08222b108b543b7628b32e98480f2e3a32b0 | refs/heads/master | 2020-03-27T10:43:09.941194 | 2018-07-23T07:21:35 | 2018-07-23T07:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | #!/usr/bin/anaconda3/bin/python
# -*- coding: utf-8 -*-
# author: @inimah
# date: 25.04.2018
import os
import sys
sys.path.append(os.getcwd())
config = {
'data_path': 'data/kp20k',
'model_path':'models',
'result_path': 'results/kp20k/v1/hier-att-full',
'decode_path': 'results/kp20k/v1/hier-att-full/decoding',
'preprocessed_data': 'results/kp20k/v1/data',
'preprocessed_v2': 'results/kp20k/v2/data',
'decoded_files': 'keyphrases-beam-sts-kp20k-hier-att-fsoftmax-v1',
'idx_words': 'all_indices_words_sent_fsoftmax.pkl',
'words_idx': 'all_words_indices_sent_fsoftmax.pkl',
'y_true': 'test_sent_output_tokens.npy',
'birnn_dim': 150,
'rnn_dim': 300,
'embedding_dim': 100,
'encoder_length': 20,
'decoder_length' : 8,
'max_sents' : 20,
'batch_size': 128,
'epoch': 100,
'vocab_size': 10004,
'file_name' : 'sts-kp20k-hier-att-fsoftmax-v1',
'weights' : 'sts-kp20k-hier-att-fsoftmax-v1.03-12.43.check'
}
if __name__ == '__main__':
'''
print("preprocessing data...")
import preprocessor
print("vectorizing and padding...")
preprocessor.transform_train_sent_fsoftmax_v1(config)
preprocessor.transform_valid_sent_fsoftmax_v1(config)
preprocessor.transform_test_sent_fsoftmax_v1(config)
print("pairing data...")
preprocessor.pair_train_sent_fsoftmax(config)
preprocessor.pair_valid_sent_fsoftmax(config)
preprocessor.pair_test_sent_fsoftmax(config)
print("training model...")
import trainer_hier_att_fsoftmax_v1
trainer_hier_att_fsoftmax_v1.trainer(config)
import decoder_hier_att_fsoftmax_v1
decoder_hier_att_fsoftmax_v1.decoder(config)
'''
import evaluator
evaluator.evaluator(config)
import read_kp_kp20k
read_kp_kp20k.reader(config)
| [
"i.nimah@tue.nl"
] | i.nimah@tue.nl |
689c2b95c3b65ae12d72ac710dd9a75fd18de29b | 515e7d6e5756e3922df0b874b241c8b0744b4570 | /udm.py | 311c2552446e51cc6061841601563dbef58c5ffb | [] | no_license | mjgpy3/udm_script | d77f4904df62e33c72f690cdf4049a1118be105b | d04802d21797fa6ed03cfc35c955bcc6d028f1c2 | refs/heads/master | 2021-01-23T11:40:25.415072 | 2013-07-30T16:53:31 | 2013-07-30T16:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | py | #!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 17:20:17 EST 2013
#
#
"""
The Ultimate Dev. Machine script. Run with sudo to turn a virtual
machine into a developer's heaven.
"""
import packs
from os import system
from datetime import datetime
def install_desired_packs(successful_packs):
"""
Loops through the desired containers and installs them
"""
failed_packs = []
for container in packs.desired_packages.package_containers:
failed_packs += container.install_me(successful_packs)
if failed_packs != []:
print "\nThe following packages failed to install correctly:"
print "\n".join(pack_name for pack_name in failed_packs)
else:
print "\nEverything installed correctly"
def get_string_name_and_pack(name, pack):
"""
Returns a string like "<Software Name> (<Package Name>)"
where (<Package Name>) can be empty if none exists.
"""
return name + (' (' + pack + ')' if pack != None else '') + '\n'
def get_string_of_packages():
"""
Returns a string of all packages that ought to be
installed
"""
packages = ''
for container in packs.desired_packages.package_containers:
packages += get_string_name_and_pack(container.name, container.package)
for name in container.packages:
packages += '-> ' + get_string_name_and_pack(name,
container.packages[name])
if container.special_instructions:
for name in container.special_instructions:
packages += '-> ' + get_string_name_and_pack(name, None)
return packages
def write_dict_as_html(dic, file_name):
"""
A quickly built function to make an html log
"""
now = str(datetime.now())
f = open(file_name, 'w')
f.write('<html>\n <head>\n </head>\n <body>\n')
f.write(' <h2>Ultimate Dev. Machine</h2>')
f.write(' <h3>Installation on: %s</h3>' % now)
for container in dic:
f.write(' <h5>%s<h5>\n <ol>\n' % container)
for package in dic[container]:
f.write(' <li>%s</li>\n' % package)
f.write(' </ol><br />\n\n')
f.write(' </body>\n</html>')
def prompter():
"""
Asks the user whether they want to install the software
or not
"""
system('echo "' + get_string_of_packages() + '" | less')
return raw_input(
"Install the listed software? (yes/no): "
).lower().startswith('y')
if __name__ == '__main__':
system('apt-get update')
log_name = "udm_log.html"
successful_packs = {}
if prompter():
install_desired_packs(successful_packs)
write_dict_as_html(successful_packs, log_name)
system('firefox ' + log_name)
| [
"mjg.py3@gmail.com"
] | mjg.py3@gmail.com |
2da3e5791a05c1a8e54d260984695e94bb6c0dea | 8c8168c0e0b66ee8c3e457057834a272f1263e57 | /histograma_gral_negv_posit.py | 471f0d332ef33404056e50de889b02250c1c16b6 | [] | no_license | juliettapc/Spreading_of_innovation | 5e3b08deea4c7a0dab605684fdb1a8a9911695dc | c4ec3397d217136374f17a30aa8244755486e619 | refs/heads/master | 2020-04-01T11:46:00.003479 | 2018-10-15T21:32:45 | 2018-10-15T21:32:45 | 153,176,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py |
##########################################
def histograma(lista, name_h,zeros=False):
# print lista
dict_value_prob={}
dict_value_cumulat_prob={}
maximo=int(max(lista) +2)
minimo=int(min(lista) -1 )
# print name_h,maximo, minimo
for i in range(minimo, maximo): # min included, max not included
#if i in lista:
dict_value_prob[i]=0.
dict_value_cumulat_prob[i]=0.
norm=0.
for item in lista: #Calculate Prob(i)
dict_value_prob[item]+=1.
norm+=1.
# print dict_value_prob
for item in lista: #Calculate Cumulat Prob(i)
for key in sorted(dict_value_prob.iterkeys()):
if key <= item:
dict_value_cumulat_prob[key]+=1.
file = open(name_h,'wt')
for key in sorted(dict_value_prob.iterkeys()):
if zeros==False: # i dont print out the null values of the distribution
if dict_value_prob[key]!=0.:
print >> file,key, dict_value_prob[key]/norm ,dict_value_prob[key]
if zeros==True:
print >> file,key, dict_value_prob[key]/norm ,dict_value_prob[key]
file.close()
file2 = open(name_h.split(".dat")[0]+"_cumulat.dat",'wt')
for key in sorted(dict_value_cumulat_prob.iterkeys()):
print >> file2,key, dict_value_cumulat_prob[key]/norm, dict_value_cumulat_prob[key]
file2.close()
print "written histogram:", name_h
| [
"julia@chem-eng.northwestern.edu"
] | julia@chem-eng.northwestern.edu |
44abec63fd059ae7bfb6a39411f3dbc9db8d6c1d | 0acc73f45ba0556a3cf92999070cc4dd4c2fb992 | /test/throughput.py | 0538c12cf97ba4ffce922ae82953c4006a953f91 | [] | no_license | unrealcv/playground | ec0aa06abb9681b3a21880121c13ed234816f5df | a814c323c5c0db02ca3f65df41c62c7d7b2e6c5a | refs/heads/master | 2021-09-27T13:03:38.568733 | 2017-06-28T02:56:46 | 2017-06-28T02:56:46 | 69,386,937 | 16 | 9 | null | 2017-06-06T02:28:25 | 2016-09-27T18:32:01 | C++ | UTF-8 | Python | false | false | 1,156 | py | # This script is used to test the throughput and frame rate of unrealcv.
import time, sys, argparse
from unrealcv import client
client.connect()
def cmd():
# sync command
# res = client.request('vget /unrealcv/echo hello')
# res = client.request('vget /unrealcv/async_echo hello')
# print res
res = client.request('vget /camera/0/location')
print res
res = client.request('vget /camera/0/rotation')
print res
# print res
# async command
# res = client.request('vget /camera/0/lit')
# res = client.request('vget /camera/0/depth depth.exr')
class FPSCounter:
def __init__(self):
self.start_index = 0
self.start_time = time.time()
def tick(self, current_index):
current_time = time.time()
if (current_time - self.start_time > 1):
print '%d fps' % (current_index - self.start_index)
self.start_index = current_index
self.start_time = current_time
def main():
counter = FPSCounter()
n_iter = 1000000
for i in range(n_iter):
counter.tick(i)
cmd()
time.sleep(5)
if __name__ == '__main__':
main()
| [
"qiuwch@gmail.com"
] | qiuwch@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.