blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c335767482abc80f60cd1001ca1f8ca1c2cc765f | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/gen/view_models/views/lobby/premacc/piggybank_base_model.py | 6c977d428f53ef168370fd9f52f075809bda33f8 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,633 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/premacc/piggybank_base_model.py
from frameworks.wulf import ViewModel
class PiggybankBaseModel(ViewModel):
__slots__ = ()
def __init__(self, properties=6, commands=0):
super(PiggybankBaseModel, self).__init__(properties=properties, commands=commands)
def getMaxAmount(self):
return self._getNumber(0)
def setMaxAmount(self, value):
self._setNumber(0, value)
def getMaxAmountStr(self):
return self._getString(1)
def setMaxAmountStr(self, value):
self._setString(1, value)
def getCurrentAmount(self):
return self._getNumber(2)
def setCurrentAmount(self, value):
self._setNumber(2, value)
def getCurrentAmountStr(self):
return self._getString(3)
def setCurrentAmountStr(self, value):
self._setString(3, value)
def getIsTankPremiumActive(self):
return self._getBool(4)
def setIsTankPremiumActive(self, value):
self._setBool(4, value)
def getTimeleft(self):
return self._getNumber(5)
def setTimeleft(self, value):
self._setNumber(5, value)
def _initialize(self):
super(PiggybankBaseModel, self)._initialize()
self._addNumberProperty('maxAmount', 1)
self._addStringProperty('maxAmountStr', '0')
self._addNumberProperty('currentAmount', 0)
self._addStringProperty('currentAmountStr', '0')
self._addBoolProperty('isTankPremiumActive', False)
self._addNumberProperty('timeleft', 0)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
8558428dc9866e41b404653a6c5542655b4bfcfc | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/prompt_toolkit/key_binding/vi_state.py | 10593a82e6288ad9a157bba04e72661e880d3e25 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 3,360 | py | from enum import Enum
from typing import TYPE_CHECKING, Callable, Dict, Optional
from prompt_toolkit.clipboard import ClipboardData
if TYPE_CHECKING:
from .key_bindings.vi import TextObject
from .key_processor import KeyPressEvent
__all__ = [
"InputMode",
"CharacterFind",
"ViState",
]
class InputMode(str, Enum):
value: str
INSERT = "vi-insert"
INSERT_MULTIPLE = "vi-insert-multiple"
NAVIGATION = "vi-navigation" # Normal mode.
REPLACE = "vi-replace"
REPLACE_SINGLE = "vi-replace-single"
class CharacterFind:
def __init__(self, character: str, backwards: bool = False) -> None:
self.character = character
self.backwards = backwards
class ViState:
"""
Mutable class to hold the state of the Vi navigation.
"""
def __init__(self) -> None:
#: None or CharacterFind instance. (This is used to repeat the last
#: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.)
self.last_character_find: Optional[CharacterFind] = None
# When an operator is given and we are waiting for text object,
# -- e.g. in the case of 'dw', after the 'd' --, an operator callback
# is set here.
self.operator_func: Optional[
Callable[["KeyPressEvent", "TextObject"], None]
] = None
self.operator_arg: Optional[int] = None
#: Named registers. Maps register name (e.g. 'a') to
#: :class:`ClipboardData` instances.
self.named_registers: Dict[str, ClipboardData] = {}
#: The Vi mode we're currently in to.
self.__input_mode = InputMode.INSERT
#: Waiting for digraph.
self.waiting_for_digraph = False
self.digraph_symbol1: Optional[str] = None # (None or a symbol.)
#: When true, make ~ act as an operator.
self.tilde_operator = False
#: Register in which we are recording a macro.
#: `None` when not recording anything.
# Note that the recording is only stored in the register after the
# recording is stopped. So we record in a separate `current_recording`
# variable.
self.recording_register: Optional[str] = None
self.current_recording: str = ""
# Temporary navigation (normal) mode.
# This happens when control-o has been pressed in insert or replace
# mode. The user can now do one navigation action and we'll return back
# to insert/replace.
self.temporary_navigation_mode = False
@property
def input_mode(self) -> InputMode:
"Get `InputMode`."
return self.__input_mode
@input_mode.setter
def input_mode(self, value: InputMode) -> None:
"Set `InputMode`."
if value == InputMode.NAVIGATION:
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
self.__input_mode = value
def reset(self) -> None:
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = InputMode.INSERT
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
# Reset recording state.
self.recording_register = None
self.current_recording = ""
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
f045587943372d8a259207ccd82edaa468953613 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Box/Files/DownloadFile.py | 76b407e839ac439a4c20ae70900db1f6240c8bd1 | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DownloadFile
# Retrieves the contents of a specified file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DownloadFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DownloadFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DownloadFile, self).__init__(temboo_session, '/Library/Box/Files/DownloadFile')
def new_input_set(self):
return DownloadFileInputSet()
def _make_result_set(self, result, path):
return DownloadFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DownloadFileChoreographyExecution(session, exec_id, path)
class DownloadFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DownloadFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(DownloadFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(DownloadFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to download.)
"""
super(DownloadFileInputSet, self)._set_input('FileID', value)
class DownloadFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DownloadFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The Base64 encoded contents of the downloaded file.)
"""
return self._output.get('Response', None)
class DownloadFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DownloadFileResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
84a54d88f7d38d8c85b43397ef42d1dd097a06d5 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/15 Threading and Concurrency/MultiProcessing/05-sharing-state-using-pipes.py | b1e3550193886be92e3125e26cf78b7aef3064e4 | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | ############################################################
#
# sharing state between processes
#
############################################################
import multiprocessing as mp
N = 20
def fn(connection, results, N):
for n in range(N):
results.append(n*n)
connection.send(results)
if __name__ == '__main__':
pipe_parent, pipe_child = mp.Pipe()
results = []
p = mp.Process(target=fn, args=(pipe_child, results, N))
p.start()
reply = pipe_parent.recv()
p.join()
print(reply[:])
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
893fab1cf48b44a1a717f23ed3e257b60fdaec80 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /McZF4JRhPus5DtRA4_8.py | 5cf5c4df707900be969ed7763295e7061f6a9041 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | """
Transcribe the given DNA strand into corresponding mRNA - a type of RNA, that
will be formed from it after transcription. DNA has the bases A, T, G and C,
while RNA converts to U, A, C and G respectively.
### Examples
dna_to_rna("ATTAGCGCGATATACGCGTAC") ➞ "UAAUCGCGCUAUAUGCGCAUG"
dna_to_rna("CGATATA") ➞ "GCUAUAU"
dna_to_rna("GTCATACGACGTA") ➞ "CAGUAUGCUGCAU"
### Notes
* Transcription is the process of making complementary strand.
* A, T, G and C in DNA converts to U, A, C and G respectively, when in mRNA.
"""
def dna_to_rna(dna):
dic={
"A":"U", "T":"A", "G":"C", "C":"G"
}
s=""
for i in dna:
if i in dic:
s+=dic[i]
return s
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b10689338c8fe9bae2a8bab3464158b39483a4e6 | f0181afd2eea9b086ce9487fb8d7fd949282140a | /ncbi/product_protein_seq.py | 5202a7a52dbb2c185fac59cd4b6857e3623f29ca | [
"MIT"
] | permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 1,140 | py | """
Extract the locus information, gene product, and translation from a genbank file
"""
import os
import sys
import argparse
from Bio import SeqIO
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Extract the locus information, gene product, and translation from a genbank file")
parser.add_argument('-f', help='genbank file', required=True)
args = parser.parse_args()
for seq in SeqIO.parse(args.f, 'genbank'):
for feature in seq.features:
pi = 'None'
if 'protein_id' in feature.qualifiers:
pi = feature.qualifiers['protein_id'][0]
gs = "None"
if 'gene' in feature.qualifiers:
gs = feature.qualifiers['gene'][0]
pd = 'None'
if 'product' in feature.qualifiers:
pd = feature.qualifiers['product'][0]
tl = "None"
if 'translation' in feature.qualifiers:
tl = feature.qualifiers['translation'][0]
if 'gpA' in gs or 'gpA' in pd:
print("\t".join([seq.id, seq.annotations['organism'], pi, gs, pd, tl]))
| [
"raedwards@gmail.com"
] | raedwards@gmail.com |
ac0f42eac7171b440802678af175d2cde73c0016 | 328afd873e3e4fe213c0fb4ce6621cb1a450f33d | /W3School/SearchandSorting/4.py | 725a50b72b117040dbec1999d45747df6ffd7017 | [] | no_license | TorpidCoder/Python | 810371d1bf33c137c025344b8d736044bea0e9f5 | 9c46e1de1a2926e872eee570e6d49f07dd533956 | refs/heads/master | 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | def bubble(arr):
for i in range(0,len(arr)):
for j in range(0,len(arr)-i-1):
if(arr[j]>arr[j+1]):
arr[j],arr[j+1] = arr[j+1],arr[j]
return arr
arr = [14,46,43,27,57,41,45,21,70]
print(bubble(arr))
| [
"sahilexemplary@gmail.com"
] | sahilexemplary@gmail.com |
5dda6f74cdb6d17620d1222030e998748d74087e | 3b2ead608d71da663af69f8260d9b05312a10571 | /Lesson_3/lesson3_6.py | eb119d846411f5370e7efa10885444adf3aa5d1e | [] | no_license | ddobik/Introduction-Python | 938f5c7dff0d51d0fe33c4ee1484747b27203805 | 1025e2c320671908595f6fccc16990756af9b6c4 | refs/heads/main | 2023-07-20T17:34:09.910739 | 2021-08-19T14:38:37 | 2021-08-19T14:38:37 | 376,647,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | x1 = int(input('Enter x1:'))
y1 = int(input('Enter y1:'))
x2 = int(input('Enter x2:'))
y2 = int(input('Enter y2:'))
if abs(x1 - x2) == 1 and abs(y1 - y2) == 2:
print('Yes')
elif abs(x1 - x2) == 2 and abs(y1 - y2) == 1:
print('Yes')
else:
print('No')
| [
"you@example.com"
] | you@example.com |
c10eb3fcc7871f0054c20e215d8aadf633ad154e | 7a10bf8748c7ce9c24c5461c21b5ebf420f18109 | /ml_training/PythonCode/P7_Descriptive+Statistics+in+Python.py | 0b7d4e91d52fa214f3d26c9a09981be833cfffa8 | [] | no_license | VishalChak/machine_learning | aced4b4bf65bbbd08c966a2f028f217a918186d5 | c6e29abe0509a43713f35ebf53da29cd1f0314c1 | refs/heads/master | 2021-06-15T07:13:56.583097 | 2019-10-05T06:01:58 | 2019-10-05T06:01:58 | 133,164,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py |
# coding: utf-8
# View first 20 rows
import pandas
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = pandas.read_csv(url, names=names)
peek = data.head()
print(peek)
# Dimensions of your data
shape = data.shape
print(shape)
# Data Types for Each Attribute
types = data.dtypes
print(types)
# Statistical Summary
pandas.set_option('display.width', 100)
pandas.set_option('precision', 3)
description = data.describe()
print(description)
# Class Distribution
class_counts = data.groupby('class').size()
print(class_counts)
# Pairwise Pearson correlations
correlations = data.corr(method='pearson')
print(correlations)
# Skew for each attribute
skew = data.skew()
print(skew)
# The skew result show a positive (right) or negative (left) skew. Values closer to zero show less skew.
| [
"vishalbabu.in@gmail.com"
] | vishalbabu.in@gmail.com |
42542463b033ad43ac9316b25e1bad450db6a910 | 8f70ad12af7eba07efa52eb29b8f99ed3900dbb9 | /AGTGA data/AGTGA/LifeHack/LifeHack 1/TestSuite/TestSuite/TestCase01.py | 19b97f56691b365278a3ad089a95877fcf12e31f | [] | no_license | Georgesarkis/AGTGARowData | 768952dc03dc342bcbe0902bf2fb1720853d0e14 | e1faa7dc820b051a73b0844eac545e597a97da16 | refs/heads/master | 2022-10-01T17:06:04.758751 | 2020-06-05T07:25:41 | 2020-06-05T07:25:41 | 267,772,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from TestSuite.TestSuiteHelper import ElementFinder
port = 'http://localhost:4723/wd/hub'
driver = webdriver.Remote(command_executor=port, desired_capabilities={'automationName' : 'UiAutomator2','deviceName': 'Moto G (5)','platformName': 'Android', 'app': 'C:/Users/ze0396/Desktop/AGTGA/APKS/LifeHack.apk' , 'autoGrantPermissions' : 'true', 'appWaitActivity' : '*.*','fullreset' : 'false','noReset' : 'true' } )
time.sleep(2)
time.sleep(2)
el = ElementFinder(driver, 385,208)
el.click()
time.sleep(2)
el = ElementFinder(driver, 822,144)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,1320)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,1046)
el.click()
time.sleep(2)
el = ElementFinder(driver, 30,497)
el.click()
driver.press_keycode(3)
driver.close_app()
driver.quit()
print('TestCase finished successfully') | [
"32592901+Georgesarkis@users.noreply.github.com"
] | 32592901+Georgesarkis@users.noreply.github.com |
8890d1e85684f3a78ab85eba661048c7c7206fa6 | 93bf4bbafe0524335ea1216f7f2941348c2cd1bd | /tensorflow/python/ops/math_ops_test.py | 9d126f7542dcf71295f7cf32c8eb18a10d536bde | [
"Apache-2.0"
] | permissive | sachinpro/sachinpro.github.io | c4951734b09588cad58711a76fe657f110163c11 | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | refs/heads/master | 2022-12-23T10:00:13.902459 | 2016-06-27T13:18:27 | 2016-06-27T13:25:58 | 25,289,839 | 1 | 1 | Apache-2.0 | 2022-12-15T00:45:03 | 2014-10-16T06:44:30 | C++ | UTF-8 | Python | false | false | 3,159 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session():
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
x = [0.49, 0.7, -0.3, -0.8]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
y = np.array([-3, -2, -1], dtype=np.int32)
z = (x - y)*(x - y)
with self.test_session():
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
if __name__ == "__main__":
googletest.main()
| [
"x0234443@ti.com"
] | x0234443@ti.com |
6fa776e5d7257a1496f3e5754db3d168fdb937b0 | d8076e1d19882e4816bcbd7f2b71039007462624 | /Fe_plot_sub.py | 6f436b80946a9b901bce9a849e19272ec985c30d | [] | no_license | kfinn6561/Light_Echoes | e87186c257228f1d191db7dbe7b74e8ddab35e22 | 15f5ed00e26606a68f0544659745c421bb985abb | refs/heads/main | 2023-04-03T16:35:14.992398 | 2021-03-23T10:20:09 | 2021-03-23T10:20:09 | 350,667,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | import numpy as np
from scipy.io.idl import readsav
import matplotlib.pylab as pl
import pylabsetup
import sys
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import readcol
import time
import glob
from matplotlib.backends.backend_pdf import PdfPages
import pickle as pkl
from colour_tools import kelly_colors
sne=['sn1998fa','sn2000H','sn2004ff','sn2006T','sn2006el','sn2008bo','sn2009mg','sn2011fu',
'sn1996cb','sn2008ax','sn2011dh','sn2011ei','sn1993J','sn2003bg']
alphabetical_sne=['sn1998fa','sn2000H','sn2004ff','sn2006T','sn2006el','sn2008bo','sn2009mg','sn2011fu','sn1993J',
'sn1996cb','sn2003bg','sn2008ax','sn2011dh','sn2011ei']
sne_handles={}
data_fol='yuqian_plot_data/'
colours=iter(kelly_colors[6:])
sne_colours={}
def plot_indi(vel_list,vel_dir,xr,yr,save_plot, annot = ''):
# Kieran's color
sn_c = pkl.load(open("sn_colors.pkl",'rb'))
key = sn_c.keys()
fig,ax = pl.subplots(figsize=(7.5,7.5))
pl.xlim(xr[0], xr[1])
pl.ylim(yr[0], yr[1]) # in unit of 1000 km/s
filename=open(data_fol+vel_list,'r').read()
sn_name_list=filename.split('\n')
symbol=['-<','->','-^','-v','-*','-d','-s','-p', '-h']
j = 0
sne_files={sn.split('_')[0]:sn for sn in sn_name_list}
for i, sn in enumerate(sne):
sn_name=sne_files[sn]
if any(sn_name.split('_')[0] in s for s in key): # to be consistent with color in Kieran's paper
res = [x for x in key if sn_name.split('_')[0] in x]
spec, phase, vel,velerr=readcol.readcol(vel_dir+sn_name,twod=False)
MFC='none' if sn not in ['sn1993J', 'sn2003bg'] else sn_c[res[0]]
sne_handles[sn]=ax.errorbar(phase, vel/1000, yerr=[velerr/1000, velerr/1000],capthick=2,fmt='-o',ms=6.5,
label=sn_name.split('_')[0],color=sn_c[res[0]],mec=sn_c[res[0]],mfc=MFC,mew=1.5)
else:
spec, phase, vel,velerr=readcol.readcol(vel_dir+sn_name,twod=False)
try:
c=sne_colours[sn]
except KeyError:
c=next(colours)
sne_colours[sn]=c
sne_handles[sn]=ax.errorbar(phase, vel/1000, yerr=[velerr/1000, velerr/1000],capthick=2,fmt=symbol[j%9],mew=0,ms=8,
label=sn_name.split('_')[0],color='gray')
j = j+1
pl.text((xr[1]-xr[0])*0.1+xr[0],(yr[1]-yr[0])*0.9+yr[0],annot,fontsize=20)
pl.xlabel("Phase since V-band maximum (days)",fontsize=20)
pl.ylabel("Absorption velocity ($10^3$ km s$^{-1}$)",fontsize=20)
minorLocatory = MultipleLocator(1000)
minorLocatorx = MultipleLocator(10)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
pl.legend(handles=[sne_handles[sn] for sn in alphabetical_sne],fontsize=15,mode="expand",loc=3,ncol=2,bbox_to_anchor=(0.3, .6, 0.7, 1))
pl.subplots_adjust(left=0.15)
pl.savefig(save_plot)
#pl.close()
plot_indi("inputIIb_HeI5875",data_fol,[-25,130],[-3,-17],'plots/IIb_HeI5876_vabs.pdf',annot = 'He I')
plot_indi("inputIIb_Halpha",data_fol,[-25,70],[-8,-25],'plots/IIb_Halpha_vabs.pdf', annot =r'H$\alpha$')
pl.show() | [
"kieran.finn@hotmail.com"
] | kieran.finn@hotmail.com |
e966f611456988fe1af4938564b6c442113b17dc | 13f148c314b638c7ca810b5d09bdfd79248283de | /manage.py | 329e9d1514df82a6f71165998d08b60e8faeca1e | [] | no_license | akx/drf_unique_together_testcase | bb6888ce2a718f186d3063daccf947e143de7e79 | 132ef026a6a547fce9723ea2cef4606a1172c721 | refs/heads/master | 2020-03-08T15:06:17.559107 | 2018-04-05T12:25:24 | 2018-04-05T12:25:24 | 128,202,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "drf_unique_together_testcase.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"akx@iki.fi"
] | akx@iki.fi |
a9c0c46edad6ba923dfb0354bc47459129d856c1 | 6a7563ad479e2c3d497d62e91f418d245ec658df | /scratch/indexoper.py | 0650e8b9aa3b7800c20f1bb25cf51fcdc44417d2 | [] | no_license | rosoba/rosoba | 979901ab4858c1559e7ae9c214fb60ca71eec9b5 | b26ae5b6b0f9b7027f306af7da9d1aff1c3e2a46 | refs/heads/master | 2021-01-19T18:36:04.107879 | 2016-01-20T09:48:48 | 2016-01-20T09:48:48 | 4,391,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | '''
Created on Nov 25, 2013
@author: rch
'''
import numpy as np
L = np.array([[0, 1],
[1, 2],
[2, 3]], dtype='i')
X = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0]], dtype='f')
u_i, u_j = X[L.T]
print u_j
| [
"rostislav.chudoba@rwth-aachen.de"
] | rostislav.chudoba@rwth-aachen.de |
053ad40b16939d38bd80d77c8b759c34b030b3ee | 4ab16447a03a85c3fdc4a016f6fa481756eeeb70 | /src/python/test/yolov3debugloss.py | 279d4b8c3f09e71aedfc600ee8acaf5578b620e9 | [] | no_license | phildue/cnn_gate_detection | 3cd4ae9efde53dbef1aa41b9f7ba5e2875dc80a7 | 9f872b18595e8cd8389d0d1733ee745c017deb3b | refs/heads/master | 2021-03-27T19:34:04.169369 | 2018-12-19T09:32:43 | 2018-12-19T09:32:43 | 112,591,501 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,141 | py | import pprint as pp
from pathlib import Path
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN, ReduceLROnPlateau, CSVLogger, \
History
from keras.optimizers import Adam
from modelzoo.GateNetDecoder import GateNetDecoder
from modelzoo.GateNetEncoder import Encoder
from modelzoo.Preprocessor import Preprocessor
from modelzoo.build_model import build_detector
from modelzoo.metrics.AveragePrecisionGateNet import AveragePrecisionGateNet
from modelzoo.metrics.GateDetectionLoss import GateDetectionLoss
from utils.fileaccess.GateGenerator import GateGenerator
from utils.fileaccess.utils import create_dirs, save_file
from utils.imageprocessing.transform.RandomEnsemble import RandomEnsemble
from utils.imageprocessing.transform.TransformResize import TransformResize
from utils.labels.ImgLabel import ImgLabel
from utils.workdir import cd_work
cd_work()
img_res = 416, 416
anchors = np.array([
[[81, 82],
[135, 169],
[344, 319]],
[[10, 14],
[23, 27],
[37, 58]],
])
architecture = [
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 4, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 8, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 24, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 32, 'strides': (2, 2), 'alpha': 0.1},
# {'name': 'max_pool', 'size': (2, 2)},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 64, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 16, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'predict'},
{'name': 'route', 'index': [3]},
{'name': 'conv_leaky', 'kernel_size': (1, 1), 'filters': 64, 'strides': (1, 1), 'alpha': 0.1},
# {'name': 'upsample', 'size': 2},
# {'name': 'route', 'index': [-1, 8]},
{'name': 'conv_leaky', 'kernel_size': (3, 3), 'filters': 32, 'strides': (1, 1), 'alpha': 0.1},
{'name': 'predict'}
]
"""
Model
"""
model, output_grids = build_detector(img_shape=(img_res[0], img_res[1], 3), architecture=architecture, anchors=anchors,
n_polygon=4)
encoder = Encoder(anchor_dims=anchors, img_norm=img_res, grids=output_grids, n_polygon=4, iou_min=0.4)
decoder = GateNetDecoder(anchor_dims=anchors, norm=img_res, grid=output_grids, n_polygon=4)
preprocessor = Preprocessor(preprocessing=[TransformResize(img_res)], encoder=encoder, n_classes=1, img_shape=img_res, color_format='bgr')
loss = GateDetectionLoss()
"""
Datasets
"""
image_source = ['resource/ext/samples/daylight_course1',
'resource/ext/samples/daylight_course5',
'resource/ext/samples/daylight_course3',
'resource/ext/samples/iros2018_course1',
'resource/ext/samples/iros2018_course5',
'resource/ext/samples/iros2018_flights',
'resource/ext/samples/basement_course3',
'resource/ext/samples/basement_course1',
'resource/ext/samples/iros2018_course3_test',
'resource/ext/samples/various_environments20k',
# 'resource/ext/samples/realbg20k'
]
batch_size = 16
n_samples = None
subsets = None
min_obj_size = 0.001
max_obj_size = 2
min_aspect_ratio = 0.3
max_aspect_ratio = 3.0
def filter(label):
objs_in_size = [obj for obj in label.objects if
min_obj_size < (obj.poly.height * obj.poly.width) / (img_res[0] * img_res[1]) < max_obj_size]
objs_within_angle = [obj for obj in objs_in_size if
min_aspect_ratio < obj.poly.height / obj.poly.width < max_aspect_ratio]
objs_in_view = []
for obj in objs_within_angle:
mat = obj.poly.points
if (len(mat[(mat[:, 0] < 0) | (mat[:, 0] > img_res[1])]) +
len(mat[(mat[:, 1] < 0) | (mat[:, 1] > img_res[0])])) > 2:
continue
objs_in_view.append(obj)
return ImgLabel(objs_in_view)
valid_frac = 0.005
train_gen = GateGenerator(image_source, batch_size=batch_size, valid_frac=valid_frac,
color_format='bgr', label_format='xml', n_samples=n_samples,
remove_filtered=False, max_empty=0, filter=filter, subsets=subsets)
"""
Paths
"""
work_dir = 'testv3'
model_dir = 'out/' + work_dir + '/'
create_dirs([model_dir])
"""
Training Config
"""
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.005)
def average_precision06(y_true, y_pred):
return AveragePrecisionGateNet(batch_size=batch_size, n_boxes=encoder.n_boxes, grid=output_grids,
norm=img_res, iou_thresh=0.6).compute(y_true, y_pred)
model.compile(optimizer=optimizer,
loss=loss.compute,
metrics=[average_precision06, loss.localization_loss, loss.confidence_loss])
initial_epoch = 0
epochs = 100
log_file_name = model_dir + '/log.csv'
append = Path(log_file_name).is_file() and initial_epoch > 0
callbacks = [
EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3, mode='min',
verbose=1),
ModelCheckpoint(model_dir, monitor='val_loss', verbose=1,
save_best_only=True,
mode='min', save_weights_only=False,
period=1),
TensorBoard(batch_size=batch_size, log_dir=model_dir, write_images=True,
histogram_freq=0),
TerminateOnNaN(),
ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, min_lr=0.00001),
CSVLogger(log_file_name, append=append),
History()
]
if isinstance(preprocessor.augmenter, RandomEnsemble):
augmentation = ''
augmenters = preprocessor.augmenter.augmenters
probs = preprocessor.augmenter.probs
for i in range(len(augmenters)):
augmentation += '\n{0:.2f} -> {1:s}'.format(probs[i], augmenters[i].__class__.__name__)
else:
augmentation = preprocessor.augmenter.__class__.__name__
summary = {'resolution': img_res,
'image_source': train_gen.source_dir,
'color_format': train_gen.color_format,
'batch_size': train_gen.batch_size,
'n_samples': train_gen.n_samples,
'transform': augmentation,
'initial_epoch': initial_epoch,
'epochs': epochs,
'weights': model.count_params(),
'architecture': architecture,
'anchors': anchors,
'img_res': img_res,
'grid': output_grids,
# 'valid_set': validation_set,
'min_obj_size': min_obj_size,
'max_obj_size': max_obj_size,
'max_aspect_ratio': max_aspect_ratio,
'min_aspect_ratio': min_aspect_ratio}
pp.pprint(summary)
save_file(summary, 'summary.txt', model_dir, verbose=False)
save_file(summary, 'summary.pkl', model_dir, verbose=False)
model.summary()
model.fit_generator(
generator=preprocessor.preprocess_train_generator(train_gen.generate()),
steps_per_epoch=(train_gen.n_samples / batch_size),
epochs=epochs,
initial_epoch=initial_epoch,
verbose=1,
validation_data=preprocessor.preprocess_train_generator(train_gen.generate_valid()),
validation_steps=int(train_gen.n_samples * train_gen.valid_frac) / batch_size,
callbacks=callbacks)
| [
"phild@protonmail.com"
] | phild@protonmail.com |
4a17b85bb8379fc9ce9ca619292237c86f7c8a04 | 75e518cb1c38cbf8bc55b5e5bb186bcf4412f240 | /migrations/versions/852cea8a2a22_initial_migration.py | 181aa2c80e51866462ceae6d38c2dafa9ee2b0a6 | [
"MIT"
] | permissive | adriankiprono/pitches_project | 984e8381b78c711bf20f84380fa83c26a1392f1b | a3102a7b1d618ad35b981414049c3ead0b5ecc3c | refs/heads/master | 2022-10-07T17:11:54.302322 | 2019-12-04T11:12:18 | 2019-12-04T11:12:18 | 223,401,174 | 0 | 0 | MIT | 2022-09-16T18:13:55 | 2019-11-22T12:44:29 | Python | UTF-8 | Python | false | false | 1,141 | py | """Initial Migration
Revision ID: 852cea8a2a22
Revises: a7cab0911f2f
Create Date: 2019-11-28 11:12:51.171433
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '852cea8a2a22'
down_revision = 'a7cab0911f2f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('comment', sa.VARCHAR(length=1000), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('pitch', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['pitch'], ['pitches.id'], name='comments_pitch_fkey'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='comments_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='comments_pkey')
)
# ### end Alembic commands ###
| [
"tuimuradrian6@gmail.com"
] | tuimuradrian6@gmail.com |
6ec735f143588e6adf5da9dd01457a246fb174ae | ea42ec421c74c273ef1e614dff447076ddd4f69a | /Week_04/126.Word-ladder-II.py | 4565ffe65c268efe85726068a328c969c18c9e36 | [] | no_license | youwithouto/algorithm021 | 5278262f0909914c7e6d6eb58d709fc173b29991 | 81e526385eb3464cbe173ea145badd4be20879af | refs/heads/main | 2023-03-01T08:07:07.526371 | 2021-02-06T04:31:30 | 2021-02-06T04:31:30 | 316,673,717 | 0 | 0 | null | 2020-11-28T06:40:55 | 2020-11-28T06:40:54 | null | UTF-8 | Python | false | false | 939 | py | import collections
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
wordSet = set(wordList)
result = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newLayer = collections.defaultdict(list)
for word in layer:
if word == endWord:
result.extend(k for k in layer[word])
else:
for i in range(len(word)):
for c in 'abcdefghijklmnopqrstuvwxyz':
nextWord = word[:i] + c + word[i + 1:]
if nextWord in wordSet:
newLayer[nextWord] += [j + [nextWord]
for j in layer[word]]
wordSet -= set(newLayer.keys())
layer = newLayer
return result
| [
"youwithouto.z@gmail.com"
] | youwithouto.z@gmail.com |
40fc6c47643f9ed3130fe6e82adeaa39f9a4b23e | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v7_2_0/aaa_config/aaa/authentication/__init__.py | d5aa7c18701ef49ed13f411a6214d847e3494fbd | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import login
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-aaa - based on the path /aaa-config/aaa/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__login',)
_yang_name = 'authentication'
_rest_name = 'authentication'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__login = YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'aaa-config', u'aaa', u'authentication']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'aaa', u'authentication']
def _get_login(self):
"""
Getter method for login, mapped from YANG variable /aaa_config/aaa/authentication/login (container)
"""
return self.__login
def _set_login(self, v, load=False):
"""
Setter method for login, mapped from YANG variable /aaa_config/aaa/authentication/login (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_login is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_login() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """login must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__login = t
if hasattr(self, '_set'):
self._set()
def _unset_login(self):
self.__login = YANGDynClass(base=login.login, is_container='container', presence=False, yang_name="login", rest_name="login", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Order of sources for login\n(default='local')", u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'auth_login_cp'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
login = __builtin__.property(_get_login, _set_login)
_pyangbind_elements = {'login': login, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
857884fcc3acbb7df2a7c6f6a680064ffe58729c | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/tutorial/control/summary.py | 92927577d2393d79f61a34f87903c70e13ad1416 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,249 | py | # 2015.11.10 21:30:56 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/tutorial/control/summary.py
from tutorial.control.functional import FunctionalVarSet
from tutorial.logger import LOG_ERROR, LOG_DEBUG
class _Flag(object):
def __init__(self, name, active, store = True):
super(_Flag, self).__init__()
self.name = name
self.active = active
self.store = store
def __repr__(self):
return '{0:>s}: {1!r:s}'.format(self.name, self.active)
def isActive(self):
return self.active
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class FlagSummary(object):
def __init__(self, flagNames, initial = None):
super(FlagSummary, self).__init__()
if flagNames is None:
flagNames = []
if initial is None:
initial = {}
self.__flags = {}
initialGetter = initial.get
for name in flagNames:
self.__flags[name] = _Flag(name, initialGetter(name, False))
return
def __repr__(self):
return 'FlagSummary({0:s}): {1!r:s}'.format(hex(id(self)), self.__flags.values())
def deactivateFlag(self, flagName):
LOG_DEBUG('Deactivate flag', flagName)
if flagName in self.__flags:
self.__flags[flagName].deactivate()
else:
self.__flags[flagName] = _Flag(flagName, False, store=False)
def activateFlag(self, flagName):
LOG_DEBUG('Activate flag: ', flagName)
if flagName in self.__flags:
self.__flags[flagName].activate()
else:
self.__flags[flagName] = _Flag(flagName, True, store=False)
def isActiveFlag(self, flagName):
activeFlag = False
if flagName in self.__flags:
activeFlag = self.__flags[flagName].isActive()
return activeFlag
def addFlag(self, flagName):
if flagName not in self.__flags:
self.__flags[flagName] = _Flag(flagName, False)
def getDict(self):
filtered = filter(lambda flag: flag.store, self.__flags.itervalues())
return dict(map(lambda flag: (flag.name, flag.active), filtered))
class VarSummary(object):
def __init__(self, varSets, runtime = None):
super(VarSummary, self).__init__()
if varSets:
self.__varSets = dict(map(lambda varSet: (varSet.getID(), FunctionalVarSet(varSet)), varSets))
else:
self.__varSets = {}
self.__runtime = runtime or {}
def get(self, varID, default = None):
if varID in self.__varSets:
result = self.__varSets[varID].getFirstActual()
else:
result = self.__runtime.get(varID, default)
return result
def set(self, varID, value):
if varID in self.__varSets:
LOG_ERROR('Var {0:>s} in not mutable.'.format(varID))
else:
LOG_DEBUG('Set var {0:>s}'.format(varID), value)
self.__runtime[varID] = value
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tutorial\control\summary.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:30:56 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
abec78e2a72aa4027585259773b61dd1fb5a4f12 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_148/ch88_2020_05_18_19_59_52_666753.py | e0172651bedd8c38edc188d258b10ee5ae48067b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | class Retangulo:
def __init__(self, x_coord, y_coord):
self.x = x_coord
self.y = y_coord
def calcula_perimetro(self):
a = self.x
b = self.y
return 2*(a+b)
def calcula_area(self):
c = self.x
d = self.y
return c*d
| [
"you@example.com"
] | you@example.com |
184f4705dd6a5a6cdcc98bd061041cf0dd48e763 | 6390cb7020af3efadfc55bf374316b39164c72e3 | /part2/2_4_7_exptced_conditions.py | 5044b10932ff08bc199440db7ea7177d8000278d | [] | no_license | andrewnnov/stepik_qa | 37fb0808eb30eb807fd951efa0716589ce8a42fa | 689dd383793aeb20e88ce89ff56ff6db263615bd | refs/heads/main | 2023-06-19T00:22:09.292291 | 2021-07-18T19:34:46 | 2021-07-18T19:34:46 | 383,043,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
link = "http://suninjuly.github.io/wait2.html"
browser = webdriver.Chrome("C:\Projects\stepik_qa\driver\chromedriver.exe")
browser.get(link)
# говорим Selenium проверять в течение 5 секунд, пока кнопка не станет кликабельной
button = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.ID, "verify"))
)
button.click()
message = browser.find_element_by_id("verify_message")
assert "successful" in message.text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| [
"andrewnnov@yandex.ru"
] | andrewnnov@yandex.ru |
ad2368024d7faf371220f45fba13ace22d01cb63 | c1847b5eced044ee1c03c9cd32bf336f38d6b17c | /apptools/apptools-ios-tests/apptools/target_platforms.py | beb6bb6c94a3e7beb4a333dfa7443c75af97151d | [
"BSD-3-Clause"
] | permissive | JianfengXu/crosswalk-test-suite | 60810f342adc009bbe249bc38e2153b1f44b5d68 | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | refs/heads/master | 2021-01-17T04:51:47.366368 | 2015-08-31T02:07:47 | 2015-08-31T02:07:47 | 17,897,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import commands
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_list_target_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.PackTools + "crosswalk-app platforms"
status = os.popen(cmd).readlines()
self.assertEquals("ios", status[0].strip(" *\n"))
self.assertEquals("android", status[1].strip(" *\n"))
self.assertEquals("windows", status[2].strip(" *\n"))
if __name__ == '__main__':
unittest.main()
| [
"yunx.liu@intel.com"
] | yunx.liu@intel.com |
8920296ee5cb27a4b0e22e713224727923678238 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/disassociate_subnet_firewall_request_body.py | 06e2bb56a2ca9a526a1f1210ffb3bc9cc8974413 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,355 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DisassociateSubnetFirewallRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'subnets': 'list[FirewallAssociation]'
}
attribute_map = {
'subnets': 'subnets'
}
def __init__(self, subnets=None):
"""DisassociateSubnetFirewallRequestBody
The model defined in huaweicloud sdk
:param subnets: 解绑ACL的子网列表
:type subnets: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
self._subnets = None
self.discriminator = None
self.subnets = subnets
@property
def subnets(self):
"""Gets the subnets of this DisassociateSubnetFirewallRequestBody.
解绑ACL的子网列表
:return: The subnets of this DisassociateSubnetFirewallRequestBody.
:rtype: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
return self._subnets
@subnets.setter
def subnets(self, subnets):
"""Sets the subnets of this DisassociateSubnetFirewallRequestBody.
解绑ACL的子网列表
:param subnets: The subnets of this DisassociateSubnetFirewallRequestBody.
:type subnets: list[:class:`huaweicloudsdkvpc.v3.FirewallAssociation`]
"""
self._subnets = subnets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DisassociateSubnetFirewallRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
84e79321c4786a570167323987d2d13e7654deb4 | 2e6f4690a2a9448a1eb027c14a637ab449b94c4f | /qa/rpc-tests/bipdersig.py | 1bb4ac97183bc298a20535bae3f3bb1c86222804 | [
"MIT"
] | permissive | mirzaei-ce/core-mashhadbit | 11d60f09f80c8056f5e063eb65783f8699f5ede8 | 1d9d45336cbbda7ffd700d3f1c3dd9e8b4ce2745 | refs/heads/master | 2021-07-18T11:43:26.440889 | 2017-10-26T14:31:07 | 2017-10-26T14:31:07 | 108,422,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the BIP66 changeover logic
#
from test_framework.test_framework import MashhadbitTestFramework
from test_framework.util import *
class BIP66Test(MashhadbitTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
4c367a95ab9afb62865daaf75a0e5314c4705ec7 | 374dea7d7d1a424d91f369cc75b11b16e1a489cd | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/bPN_2o1RXRZaK7Vxgp3oTysbcxQmJr9XStOWBh0VWNo=/binascii.cpython-37m-x86_64-linux-gnu.pyi | 352cce23207d526d4b293041c52677dec98d99b8 | [] | no_license | tkoon107/text-generation-LSTM-neural-net | ed0e6a0fb906f4b4fd649eadfe36c254144be016 | 6b98ee355a30da128462bfac531509539d6533ae | refs/heads/master | 2020-05-27T16:46:44.128875 | 2019-06-10T18:26:54 | 2019-06-10T18:26:54 | 188,708,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | pyi | import builtins as _mod_builtins
class Error(_mod_builtins.ValueError):
__class__ = Error
__dict__ = {}
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__module__ = 'binascii'
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __weakref__(self):
'list of weak references to the object (if defined)'
pass
class Incomplete(_mod_builtins.Exception):
__class__ = Incomplete
__dict__ = {}
def __init__(self, *args, **kwargs):
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
__module__ = 'binascii'
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@property
def __weakref__(self):
'list of weak references to the object (if defined)'
pass
__doc__ = 'Conversion between binary data and ASCII'
__file__ = '/home/trevor/anaconda3/lib/python3.7/lib-dynload/binascii.cpython-37m-x86_64-linux-gnu.so'
__name__ = 'binascii'
__package__ = ''
def a2b_base64(data):
'Decode a line of base64 data.'
pass
def a2b_hex(hexstr):
'Binary data of hexadecimal representation.\n\nhexstr must contain an even number of hex digits (upper or lower case).\nThis function is also available as "unhexlify()".'
pass
def a2b_hqx(data):
'Decode .hqx coding.'
pass
def a2b_qp(data, header):
'Decode a string of qp-encoded data.'
pass
def a2b_uu(data):
'Decode a line of uuencoded data.'
pass
def b2a_base64(data):
'Base64-code line of data.'
pass
def b2a_hex(data):
'Hexadecimal representation of binary data.\n\nThe return value is a bytes object. This function is also\navailable as "hexlify()".'
pass
def b2a_hqx(data):
'Encode .hqx data.'
pass
def b2a_qp(data, quotetabs, istext, header):
'Encode a string using quoted-printable encoding.\n\nOn encoding, when istext is set, newlines are not encoded, and white\nspace at end of lines is. When istext is not set, \\r and \\n (CR/LF)\nare both encoded. When quotetabs is set, space and tabs are encoded.'
pass
def b2a_uu(data):
'Uuencode line of data.'
pass
def crc32(data, crc):
'Compute CRC-32 incrementally.'
pass
def crc_hqx(data, crc):
'Compute CRC-CCITT incrementally.'
pass
def hexlify(data):
'Hexadecimal representation of binary data.\n\nThe return value is a bytes object.'
pass
def rlecode_hqx(data):
'Binhex RLE-code binary data.'
pass
def rledecode_hqx(data):
'Decode hexbin RLE-coded string.'
pass
def unhexlify(hexstr):
'Binary data of hexadecimal representation.\n\nhexstr must contain an even number of hex digits (upper or lower case).'
pass
| [
"trevorlang@langdatascience.org"
] | trevorlang@langdatascience.org |
23f64bddd7650d60c54a5d74d312571372533641 | d7620b35a248cf1cabc98f721026a781164c89f5 | /OpenCV-basic/ch03/arithmetic.py | 2f220e3aab44a851cb6efc0940d6ac10c11964da | [] | no_license | Seonghyeony/Project-OpenCV | e53ed1e72b113a29cc5890a89a4e7f4078dfa198 | a07fdc4bfa4cbf5b851a6ec20f0873d09cecbd54 | refs/heads/main | 2023-02-17T22:38:35.537226 | 2021-01-19T13:09:29 | 2021-01-19T13:09:29 | 326,711,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | import sys
import numpy as np
import cv2
from matplotlib import pyplot as plt
src1 = cv2.imread('lenna256.bmp', cv2.IMREAD_GRAYSCALE)
src2 = cv2.imread('square.bmp', cv2.IMREAD_GRAYSCALE)
if src1 is None or src2 is None:
print('Image load failed!')
sys.exit()
dst1 = cv2.add(src1, src2, dtype=cv2.CV_8U) # 덧셈 연산
dst2 = cv2.addWeighted(src1, 0.5, src2, 0.5, 0.0) # 가중치 연산
dst3 = cv2.subtract(src1, src2) # 뺄셈 연산
dst4 = cv2.absdiff(src1, src2) # 차 연산
# 2행 3열 1번 째, 2행 3열 2번 째....
plt.subplot(231), plt.axis('off'), plt.imshow(src1, 'gray'), plt.title('src1')
plt.subplot(232), plt.axis('off'), plt.imshow(src2, 'gray'), plt.title('src2')
plt.subplot(233), plt.axis('off'), plt.imshow(dst1, 'gray'), plt.title('add')
plt.subplot(234), plt.axis('off'), plt.imshow(dst2, 'gray'), plt.title('addWeighted')
plt.subplot(235), plt.axis('off'), plt.imshow(dst3, 'gray'), plt.title('subtract')
plt.subplot(236), plt.axis('off'), plt.imshow(dst4, 'gray'), plt.title('absdiff')
plt.show()
| [
"sunghyun7949@naver.com"
] | sunghyun7949@naver.com |
dadd0fa88fd200adad18bac6e84b65a9d05615d6 | 802105debf55010216717f1a2536057cec2f147c | /convo/migrations/0005_convo_user.py | b262f16574328cd460c80a81fdf8dda56e3acfc7 | [] | no_license | Ibrokola/speakout | ba4bbff627fcd0a82cf206b45d4a2938931ede0e | a800e17edbd69be2c45fec7fe75a83ed1b92d8c5 | refs/heads/master | 2021-01-19T16:59:42.743235 | 2017-05-06T04:37:26 | 2017-05-06T04:37:26 | 88,296,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 10:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('convo', '0004_auto_20170415_1044'),
]
operations = [
migrations.AddField(
model_name='convo',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"babskolawole@gmail.com"
] | babskolawole@gmail.com |
5607094dc8545aa042719562e0b5822af083ab9f | ac33111a551d13c13f96abd64898ce216959ada0 | /study/sorting/6-02 위에서아래로.py | fa7f9e4debd4637dbca63642ba35a3245ee2076f | [] | no_license | apple2062/algorithm | b48833e2ebcfe08623f328309780ab5e59749c79 | 9bdd08f513bc2f7600b7b263738e3eb09e86f77c | refs/heads/master | 2023-04-15T05:09:06.401237 | 2021-04-29T03:30:55 | 2021-04-29T03:30:55 | 289,252,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # 6-2 위에서 아래로
# 크기에 상관없이 나열되어 있다.
# 큰 숫 부터 작은 수 순서로 정렬 . 내림차순 정렬 프로그램 만들기
n = int(input())
matrix = []
for i in range(n):
matrix.append(int(input()))
matrix.sort(reverse=True)
for i in matrix:
print(i,end = ' ')
| [
"apple2062@naver.com"
] | apple2062@naver.com |
cec893095fc572f735a747b84cbe0d4f91bce9c7 | 9275454ce938751179ef08ecc21b5dd22a1a0ef0 | /src/brasil/gov/barra/tests/test_helper.py | 1dcf8e1fb446a3a1350cc5cde01b87c10d6d0263 | [] | no_license | Uelson/brasil.gov.barra | a949e49f3c7fd6e52dd657946946ef5d574bb849 | 649fbb6a36a541fb129bb234a307bff6a7e9c0f0 | refs/heads/master | 2020-05-20T19:31:16.899824 | 2015-02-26T21:44:03 | 2015-02-26T21:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | # -*- coding: utf-8 -*-
from brasil.gov.barra.interfaces import IBarraInstalada
from brasil.gov.barra.testing import INTEGRATION_TESTING
from plone import api
from zope.interface import alsoProvides
import unittest2 as unittest
class HelperViewTest(unittest.TestCase):
""" Caso de teste da Browser View BarraHelper"""
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
pp = api.portal.get_tool('portal_properties')
self.sheet = getattr(pp, 'brasil_gov', None)
# Como nao eh um teste funcional, este objeto
# REQUEST precisa ser anotado com o browser layer
alsoProvides(self.portal.REQUEST, IBarraInstalada)
def test_helper_view_registration(self):
""" Validamos se BarraHelper esta registrada"""
view = api.content.get_view(
name='barra_helper',
context=self.portal,
request=self.portal.REQUEST,
)
view = view.__of__(self.portal)
self.failUnless(view)
def test_helper_view_local(self):
"""Uso do metodo local"""
# Obtemos a Browser view
view = api.content.get_view(
name='barra_helper',
context=self.portal,
request=self.portal.REQUEST,
)
# Validamos que ela retorne o valor padrao para
# o metodo remoto(configurado em profiles/default/propertiestool.xml)
self.assertFalse(view.local())
# Alteramos o valor para hospedagem para local
self.sheet.local = True
# O resultado da consulta a Browser View deve se adequar
self.assertTrue(view.local())
| [
"erico@simplesconsultoria.com.br"
] | erico@simplesconsultoria.com.br |
e0491c010038ee0b638174e74c8ad306a26cd3a4 | 3f84ff1f506287bf0bb3b0840947e3ef23f22c87 | /10day/8-工厂模式5.py | dbbcbe331d30c8a777a92ad609bc957cecc5b3e6 | [] | no_license | 2099454967/wbx | 34b61c0fc98a227562ea7822f2fa56c5d01d3654 | 316e7ac7351b532cb134aec0740e045261015920 | refs/heads/master | 2020-03-18T06:09:58.544919 | 2018-05-28T13:01:19 | 2018-05-28T13:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | #商店的基类(抽象类) 规定好商店要干的事情
#预订
#创建
class Store(object):
def createFactory(self,type):
pass
def order(self,type):
return self.createFactory(type)
class BmwStore(Store):
def createFactory(self,type):
return BmwFactory().selectCar(type)
class BCStore(Store):
def createFactory(self,type):
return BCFactory().selectCar(type)
class Factory(object):
def __init__(self,name):
self.name = name
def selectCar(self,type):
pass
class BCFactory(Factory):
def selectCar(self,type):
if type == 0:
return Bmw730()
elif type == 1:
return Bmwx5()
class BmwFactory(Factory):
def selectCar(self,type):
if type == 0:
return DaG()
elif type == 1:
return XiaoG()
class Car(object):
def move(self):
print("在移动")
def music(self):
print("播放音乐")
class Bmw730(Car):
pass
class Bmwx5(Car):
pass
class DaG(Car):
pass
class XiaoG(Car):
pass
if __name__ == '__main__':
store = BmwStore()
bmwx5 = store.order(1)
bmwx5.move()
bmwx5.music()
| [
"2099454967@qq.com"
] | 2099454967@qq.com |
2c671f1c4a133aca1fab7ed9e5ecaf37b23c15ad | ba7052d8cf27317d7ebd22911e44aec860464307 | /durable_rules_tools/magic.py | 9d67ab8b328bcf9a84bdf2ac2c57607849a25aea | [
"MIT"
] | permissive | innovationOUtside/durable_rules_magic | 841a005ecae417910dbc7800270483e1eed11945 | 3d2fd4386ce2ea8ab83e6e84a39822dc56e21f9a | refs/heads/master | 2023-01-10T16:49:16.784258 | 2020-11-02T18:26:15 | 2020-11-02T18:26:15 | 263,738,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | from IPython.core.magic import magics_class, line_cell_magic, Magics
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
import warnings
from .rules_utils import quick_assert_fact, quick_retract_fact, quick_post_event, _delete_state
# TO DO - things are passed in from the magic as strings
# Should we try to cast them to eg int, float, list, tuple, dict?
@magics_class
class DurableRulesMagic(Magics):
def __init__(self, shell, cache_display_data=False):
super(DurableRulesMagic, self).__init__(shell)
self.graph = None
self.RULESET = None
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def assert_facts(self, line, cell):
"Assert and/or retract several facts."
args = parse_argstring(self.assert_facts, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('-'):
quick_retract_fact(_ruleset, _assertion.lstrip('-'))
elif not _assertion.startswith('#'):
quick_assert_fact(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def retract_facts(self, line, cell):
"Retract and/or assert several facts."
args = parse_argstring(self.retract_facts, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('*'):
quick_assert_fact(_ruleset, _assertion.lstrip('-'))
elif not _assertion.startswith('#'):
quick_retract_fact(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def post_events(self, line, cell):
"Post several events."
args = parse_argstring(self.post_events, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if not _assertion.startswith('#'):
quick_post_event(_ruleset, _assertion)
@line_cell_magic
@magic_arguments()
@argument('--ruleset', '-r', default='', help='Ruleset name.')
@argument('--no-reset', action='store_false', help='Disable automatic state deletion.')
def facts_and_events(self, line, cell):
"Assert and/or retract several facts and/or post several events."
args = parse_argstring(self.facts_and_events, line)
if not args.ruleset and self.RULESET is None:
warnings.warn("You must provide a ruleset reference (--ruleset/-r RULESET).")
return
elif args.ruleset:
self.RULESET = self.shell.user_ns[args.ruleset]
_ruleset = self.RULESET
#print(_ruleset)
if args.no_reset:
_delete_state(_ruleset)
for _assertion in cell.split('\n'):
if _assertion.startswith('-'):
quick_retract_fact(_ruleset, _assertion.lstrip('-'))
elif _assertion.startswith('*'):
quick_assert_fact(_ruleset, _assertion.lstrip('*'))
elif _assertion.startswith('%'):
quick_post_event(_ruleset, _assertion.lstrip('%')) | [
"tony.hirst@gmail.com"
] | tony.hirst@gmail.com |
3e1c302e7759ae163c083e1f83582e237a26a36c | fb365b414076ae14b65c05a6ebb271eb6a35c81d | /cron.py | 84c871029f124f562a8abce7b71e3dc975c3eecf | [
"MIT"
] | permissive | kklmn/ArdquariumPi | 425691ad618968ca3fc89e744918962396fcbaef | 1c84e6d548c22f72f6b93ca36900677cccbdfc00 | refs/heads/master | 2022-04-03T07:13:50.224233 | 2022-02-14T17:52:56 | 2022-02-14T17:52:56 | 236,858,425 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | # -*- coding: utf-8 -*-
import subprocess
from datetime import datetime
from croniter import croniter
cronTable = None
cronTest = \
"""
20 7 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
45 8 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light off
30 9 * * 6,7 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
30 17 * * 1-5 /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light on
0 23 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name light off
0 11 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name filter-S on
0 20 * * * /usr/bin/python3 /home/pi/ArdquariumPi/gpioswitch.py --name filter-S off
"""
def init_cron_tasks(isTest=False):
global cronTable
txt = cronTest if isTest else subprocess.Popen(
# ['crontab', '-l'],
['crontab', '-l', '-u', 'pi'],
stdout=subprocess.PIPE,
encoding='utf8').communicate()[0]
cronTable = [l.strip().split(' ') for l in txt.split("\n")
if l and not l.startswith('#') and "gpioswitch" in l]
if cronTable:
cronTable = [[' '.join(l[:5]), l[-2], l[-1]] for l in cronTable]
return cronTable
def get_cron_tasks():
res = {}
if not cronTable:
return res
now = datetime.now()
for cron, what, state in cronTable:
# print(cron, what, state)
cli = croniter(cron, now)
prevt, nextt = cli.get_prev(datetime), cli.get_next(datetime)
if what in res:
if 'prev'+state in res[what]:
condPrev = prevt >= res[what]['prev'+state]
condNext = nextt <= res[what]['next'+state]
else:
condPrev, condNext = True, True
else:
res[what] = {}
condPrev, condNext = True, True
if condPrev:
res[what]['prev'+state] = prevt
if condNext:
res[what]['next'+state] = nextt
bad = []
for what in res:
try:
if res[what]['prevon'] > res[what]['prevoff']: # now on
res[what]['str'] = 'on by crontab\n{0} – {1}'.format(
res[what]['prevon'].strftime('%H:%M'),
res[what]['nextoff'].strftime('%H:%M'))
else: # now off
res[what]['str'] = 'off by crontab\n{0} – {1}'.format(
res[what]['prevoff'].strftime('%H:%M'),
res[what]['nexton'].strftime('%H:%M'))
except KeyError:
bad.append(what)
for what in bad:
del res[what]
return res
| [
"konstantin.klementiev@gmail.com"
] | konstantin.klementiev@gmail.com |
7566f2795f158d397f3272917fa55e7841f35ed2 | 5cb7b9fe09b1dd20c0664d0c86c375ffe353903c | /static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_codecmaps_jp.py | 0bf43b676bf4c3ae4a0b5f92f7f4ba4ac350e2fa | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | shiblon/pytour | 6d0ee4a679cf7e6ffd8ac6326b8bb0d9071a7c73 | 71a181ec16fd38b0af62f55e28a50e91790733b9 | refs/heads/master | 2021-01-17T10:09:18.822575 | 2020-09-23T20:05:58 | 2020-09-23T20:05:58 | 23,226,350 | 2 | 3 | Apache-2.0 | 2020-02-17T22:36:02 | 2014-08-22T13:33:27 | Python | UTF-8 | Python | false | false | 1,968 | py | #
# test_codecmaps_jp.py
# Codec mapping tests for Japanese encodings
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class TestCP932Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp932'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
'WINDOWS/CP932.TXT'
supmaps = [
('\x80', u'\u0080'),
('\xa0', u'\uf8f0'),
('\xfd', u'\uf8f1'),
('\xfe', u'\uf8f2'),
('\xff', u'\uf8f3'),
]
for i in range(0xa1, 0xe0):
supmaps.append((chr(i), unichr(i+0xfec0)))
class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jp'
mapfilename = 'EUC-JP.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT'
class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jis'
mapfilename = 'SHIFTJIS.TXT'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \
'/EASTASIA/JIS/SHIFTJIS.TXT'
pass_enctest = [
('\x81_', u'\\'),
]
pass_dectest = [
('\\', u'\xa5'),
('~', u'\u203e'),
('\x81_', u'\\'),
]
class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jisx0213'
mapfilename = 'EUC-JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT'
class TestSJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jisx0213'
mapfilename = 'SHIFT_JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT'
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| [
"shiblon@gmail.com"
] | shiblon@gmail.com |
4800755881cc1cf2bdf95087c49ee5aefac0d3c2 | 9f8a746b4bd8b64affa1e7eab1be5cad40030be1 | /train.py | 8036dc193a3e83bac1dc57bfc8a7b8326a75b332 | [
"Apache-2.0"
] | permissive | jianchi2001/PaddlePaddle-DeepSpeech | 5437b0050fbd21193ed741b61d7d1bd4f4279771 | 7b89e63dfa1dfa1deb1e9f43c521196e8a278fd5 | refs/heads/master | 2023-06-21T12:33:19.967696 | 2021-08-03T03:34:45 | 2021-08-03T03:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,244 | py | import argparse
import functools
import io
from datetime import datetime
from model_utils.model import DeepSpeech2Model
from data_utils.data import DataGenerator
from utils.utility import add_arguments, print_arguments, get_data_len
import paddle.fluid as fluid
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 4, "训练每一批数据的大小")
add_arg('num_epoch', int, 200, "训练的轮数")
add_arg('num_conv_layers', int, 2, "卷积层数量")
add_arg('num_rnn_layers', int, 3, "循环神经网络的数量")
add_arg('rnn_layer_size', int, 2048, "循环神经网络的大小")
add_arg('learning_rate', float, 5e-5, "初始学习率")
add_arg('min_duration', float, 1.0, "最短的用于训练的音频长度")
add_arg('max_duration', float, 15.0, "最长的用于训练的音频长度")
add_arg('test_off', bool, False, "是否关闭测试")
add_arg('use_gru', bool, True, "是否使用GRUs模型,不使用RNN")
add_arg('use_gpu', bool, True, "是否使用GPU训练")
add_arg('share_rnn_weights',bool, False, "是否在RNN上共享权重")
add_arg('init_from_pretrained_model', str, None, "使用预训练模型的路径,当为None是不使用预训练模型")
add_arg('train_manifest', str, './dataset/manifest.train', "训练的数据列表")
add_arg('dev_manifest', str, './dataset/manifest.test', "测试的数据列表")
add_arg('mean_std_path', str, './dataset/mean_std.npz', "数据集的均值和标准值的npy文件路径")
add_arg('vocab_path', str, './dataset/zh_vocab.txt', "数据集的词汇表文件路径")
add_arg('output_model_dir', str, "./models", "保存训练模型的文件夹")
add_arg('augment_conf_path', str, './conf/augmentation.config', "数据增强的配置文件,为json格式")
add_arg('shuffle_method', str, 'batch_shuffle_clipped', "打乱数据的方法", choices=['instance_shuffle', 'batch_shuffle', 'batch_shuffle_clipped'])
args = parser.parse_args()
# 训练模型
def train():
# 是否使用GPU
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
# 获取训练数据生成器
train_generator = DataGenerator(vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config=io.open(args.augment_conf_path, mode='r', encoding='utf8').read(),
max_duration=args.max_duration,
min_duration=args.min_duration,
place=place)
# 获取测试数据生成器
test_generator = DataGenerator(vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
keep_transcription_text=True,
place=place,
is_training=False)
# 获取训练数据
train_batch_reader = train_generator.batch_reader_creator(manifest_path=args.train_manifest,
batch_size=args.batch_size,
shuffle_method=args.shuffle_method)
# 获取测试数据
test_batch_reader = test_generator.batch_reader_creator(manifest_path=args.dev_manifest,
batch_size=args.batch_size,
shuffle_method=None)
# 获取DeepSpeech2模型
ds2_model = DeepSpeech2Model(vocab_size=train_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.init_from_pretrained_model,
output_model_dir=args.output_model_dir,
vocab_list=test_generator.vocab_list)
# 获取训练数据数量
num_samples = get_data_len(args.train_manifest, args.max_duration, args.min_duration)
print("[%s] 训练数据数量:%d\n" % (datetime.now(), num_samples))
# 开始训练
ds2_model.train(train_batch_reader=train_batch_reader,
dev_batch_reader=test_batch_reader,
learning_rate=args.learning_rate,
gradient_clipping=400,
batch_size=args.batch_size,
num_samples=num_samples,
num_epoch=args.num_epoch,
test_off=args.test_off)
def main():
print_arguments(args)
train()
if __name__ == '__main__':
main()
| [
"yeyupiaoling@foxmail.com"
] | yeyupiaoling@foxmail.com |
eb74a08ec12587a4a54535a70cd11c74cd3333a5 | adce0de4c11887519b8e471f1cbca4e18b46d906 | /h0rton/trainval_data/xy_data.py | 818db3539d211824513348099f2810ba4c5356e5 | [
"MIT"
] | permissive | jiwoncpark/h0rton | 30ca4a3c9943099ecd393e4b936b48cad7d81943 | 2541885d70d090fdb777339cfb77a3a9f3e7996d | refs/heads/master | 2021-06-25T23:08:26.902632 | 2021-01-12T01:57:47 | 2021-01-12T01:57:47 | 199,093,811 | 7 | 1 | null | 2020-03-19T16:02:01 | 2019-07-26T23:56:49 | Jupyter Notebook | UTF-8 | Python | false | false | 6,280 | py | import os
import glob
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from baobab import BaobabConfig
from baobab.data_augmentation.noise_torch import NoiseModelTorch
from baobab.sim_utils import add_g1g2_columns
from .data_utils import whiten_pixels, rescale_01, plus_1_log, whiten_Y_cols
__all__ = ['XYData']
class XYData(Dataset): # torch.utils.data.Dataset
"""Represents the XYData used to train or validate the BNN
"""
def __init__(self, is_train, Y_cols, float_type, define_src_pos_wrt_lens, rescale_pixels, log_pixels, add_pixel_noise, eff_exposure_time, train_Y_mean=None, train_Y_std=None, train_baobab_cfg_path=None, val_baobab_cfg_path=None, for_cosmology=False, rescale_pixels_type='whiten_pixels'):
"""
Parameters
----------
dataset_dir : str or os.path object
path to the directory containing the images and metadata
data_cfg : dict or Dict
copy of the `data` field of `BNNConfig`
for_cosmology : bool
whether the dataset will be used in cosmological inference
(in which case cosmology-related metadata will be stored)
"""
#self.__dict__ = data_cfg.deepcopy()
self.is_train = is_train
if self.is_train:
self.baobab_cfg = BaobabConfig.from_file(train_baobab_cfg_path)
else:
self.baobab_cfg = BaobabConfig.from_file(val_baobab_cfg_path)
self.dataset_dir = self.baobab_cfg.out_dir
if not self.is_train:
if train_Y_mean is None or train_Y_std is None:
raise ValueError("Mean and std of training set must be provided for whitening.")
self.train_Y_mean = train_Y_mean
self.train_Y_std = train_Y_std
self.Y_cols = Y_cols
self.float_type = float_type
self.float_type_numpy = np.float64 if 'Double' in float_type else np.float32
self.define_src_pos_wrt_lens = define_src_pos_wrt_lens
self.rescale_pixels = rescale_pixels
self.log_pixels = log_pixels
self.add_pixel_noise = add_pixel_noise
self.eff_exposure_time = eff_exposure_time
self.bandpass_list = self.baobab_cfg.survey_info.bandpass_list
self.for_cosmology = for_cosmology
#################
# Target labels #
#################
metadata_path = os.path.join(self.dataset_dir, 'metadata.csv')
Y_df = pd.read_csv(metadata_path, index_col=False)
if 'external_shear_gamma1' not in Y_df.columns: # assumes gamma_ext, psi_ext were sampled
Y_df = add_g1g2_columns(Y_df)
# Define source light position as offset from lens mass
if self.define_src_pos_wrt_lens:
Y_df['src_light_center_x'] -= Y_df['lens_mass_center_x']
Y_df['src_light_center_y'] -= Y_df['lens_mass_center_y']
train_Y_to_whiten = Y_df[self.Y_cols].values
if self.is_train:
self.train_Y_mean = np.mean(train_Y_to_whiten, axis=0, keepdims=True)
self.train_Y_std = np.std(train_Y_to_whiten, axis=0, keepdims=True)
# Store the unwhitened metadata
if self.for_cosmology:
self.Y_df = Y_df.copy()
# Number of predictive columns
self.Y_dim = len(self.Y_cols)
# Whiten the columns
whiten_Y_cols(Y_df, self.train_Y_mean, self.train_Y_std, self.Y_cols)
# Convert into array the columns required for training
self.img_filenames = Y_df['img_filename'].values
self.Y_array = Y_df[self.Y_cols].values.astype(self.float_type_numpy)
# Free memory
if not self.for_cosmology:
del Y_df
################
# Input images #
################
# Set some metadata
img_path = glob.glob(os.path.join(self.dataset_dir, '*.npy'))[0]
img = np.load(img_path)
self.X_dim = img.shape[0]
# Rescale pixels, stack filters, and shift/scale pixels on the fly
if rescale_pixels_type == 'rescale_01':
rescale = transforms.Lambda(rescale_01)
else:
rescale = transforms.Lambda(whiten_pixels)
log = transforms.Lambda(plus_1_log)
transforms_list = []
if self.log_pixels:
transforms_list.append(log)
if self.rescale_pixels:
transforms_list.append(rescale)
if len(transforms_list) == 0:
self.X_transform = lambda x: x
else:
self.X_transform = transforms.Compose(transforms_list)
# Noise-related kwargs
self.noise_kwargs = {}
self.noiseless_exposure_time = {}
self.noise_model = {}
self.exposure_time_factor = np.ones([len(self.bandpass_list), 1, 1]) # for broadcasting
for i, bp in enumerate(self.bandpass_list):
survey_object = self.baobab_cfg.survey_object_dict[bp]
# Dictionary of SingleBand kwargs
self.noise_kwargs[bp] = survey_object.kwargs_single_band()
# Factor of effective exptime relative to exptime of the noiseless images
self.exposure_time_factor[i, :, :] = self.eff_exposure_time[bp]/self.noise_kwargs[bp]['exposure_time']
if self.add_pixel_noise:
self.noise_kwargs[bp].update(exposure_time=self.eff_exposure_time[bp])
# Dictionary of noise models
self.noise_model[bp] = NoiseModelTorch(**self.noise_kwargs[bp])
def __getitem__(self, index):
# Image X
img_filename = self.img_filenames[index]
img_path = os.path.join(self.dataset_dir, img_filename)
img = np.load(img_path)
img *= self.exposure_time_factor
img = torch.as_tensor(img.astype(self.float_type_numpy)) # np array type must match with default tensor type
if self.add_pixel_noise:
for i, bp in enumerate(self.bandpass_list):
img[i, :, :] += self.noise_model[bp].get_noise_map(img[i, :, :])
img = self.X_transform(img)
# Label Y
Y_row = self.Y_array[index, :]
Y_row = torch.as_tensor(Y_row)
return img, Y_row
def __len__(self):
return self.Y_array.shape[0] | [
"jiwon.christine.park@gmail.com"
] | jiwon.christine.park@gmail.com |
fb6de41cf67712e420b26a3593eda05e4a28a4d8 | 872cd13f25621825db0c598268ecd21b49cc2c79 | /Lesson_1/1.py | 425998ea943a1b948516d69748b927e98b243a0d | [] | no_license | ss2576/client_server_applications_Python | c4e9ebe195d23c8ca73211894aa50a74014013d5 | 9b599e37e5dae5af3dca06e197916944f12129d5 | refs/heads/master | 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 | Python | UTF-8 | Python | false | false | 935 | py | # Каждое из слов «разработка», «сокет», «декоратор» представить в строковом формате и проверить тип и
# содержание соответствующих переменных. Затем с помощью онлайн-конвертера преобразовать строковые
# представление в формат Unicode и также проверить тип и содержимое переменных.
str_1 = ('разработка', 'сокет', 'декоратор')
print('тип и содержание str_1')
for elem in str_1:
print(type(elem), elem)
str_2 = ('\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u043a\u0430',
'\u0441\u043e\u043a\u0435\u0442',
'\u0434\u0435\u043a\u043e\u0440\u0430\u0442\u043e\u0440')
print('тип и содержание str_2')
for elem in str_2:
print(type(elem), elem)
| [
"ss2576@mail.ru"
] | ss2576@mail.ru |
4a425c2ee8e6a31e7a911470acb6b0c203fddbcd | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_benidoc_pancakes.py | 53e0469eb2d421c779c30d9d18d071dc28934963 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 596 | py | #! python
def flips(pancakes):
flippies = 0
noHappies = 1
onaSadRun = 0
for p in pancakes:
if noHappies:
if p == '+':
noHappies = 0
else:
flippies = 1
else:
if not onaSadRun and p == '-':
onaSadRun = 1
flippies += 2
elif onaSadRun and p == '+':
onaSadRun = 0
return flippies
fin = open('B-large.in')
fout = open('large_output.txt', 'w+')
cases = int(fin.readline())
i = 1
for c in range(0, cases):
pancakes = fin.readline()
fout.write('Case #' + str(i) + ': ' + str(flips(pancakes)) + '\n')
i += 1
fin.close()
fout.close() | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
a314794ab7fe0af0d04c2017a07d685c7f74494a | 99a43cdb360b28f9d8d8cc8fc27d8c2f45271c77 | /app01/migrations/0022_auto_20190104_0518.py | 38e6f457dc7d1161df0a045ef203909b61bca2b6 | [] | no_license | xiaoyaolaotou/MyBook | a542e8702ab46ae1904c3d2efa702cbf642033c0 | d83f07f968005bd34246c684c1bd34405ff07d32 | refs/heads/master | 2020-04-11T10:30:40.995240 | 2019-01-08T08:44:40 | 2019-01-08T08:44:40 | 161,717,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.0 on 2019-01-04 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0021_auto_20190104_0517'),
]
operations = [
migrations.AlterField(
model_name='publisher',
name='name',
field=models.CharField(max_length=128, unique=True),
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
3cd6c9e6f486380fddb2727858f3c076c0daab00 | a50e73d880fcea987cd2ddd4cc059a67cd7e22e0 | /day10/动物类.py | 36ea7a714675ad9221349a620697165bfee88247 | [] | no_license | Icecarry/learn | 31bed60d5b61201d30bfbaaf520e4e0146e10863 | 2af301b92c9143def9b4c278024d6d2d6e21f0b9 | refs/heads/master | 2021-04-06T07:45:11.938995 | 2018-03-13T06:40:54 | 2018-03-13T06:40:54 | 124,759,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """
创建一个动物类,
并通过__init__方法接受参数(name),
使用私有属性name保存参数值,并打印"init被调用".
在动物类中定义一个__del__()方法,
使其在删除的时候自动被调用,
并打印"del被调用".
使用动物类,实例化一个dog对象取名"八公"
"""
# 创建动物类
class Animal(object):
# 初始化属性
def __init__(self, name):
self.__name = name
print('init被调用')
# 删除时调用
def __del__(self):
print('del被调用')
# 创建对象dog
dog = Animal('八公')
dog1 = dog
dog2 = dog
print('删除对象dog')
del dog
print('删除对象dog1')
del dog1
print('删除对象dog2')
del dog2
| [
"tyj1035@outlook.com"
] | tyj1035@outlook.com |
bbbeea5b0fff0c61265c637a5569434f4de37523 | 28f1baacde04c3ea85bb246ce1a8c66259dca90b | /dbe/dbe/settings.py | e5bc9e70e996f0c3758484da169d47be68111b58 | [] | no_license | gzpgg3x/lightbirddjango1.5bombquiz | 49c8d79fda28f4d2d4410c710d01c279c488fe77 | 795e41c07adbfa26b85883e2876a9aae8fb188e9 | refs/heads/master | 2020-04-28T09:44:45.019885 | 2013-05-27T04:48:54 | 2013-05-27T04:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,455 | py | # Django settings for dbe project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.path.dirname(__file__), 'mydata.db')
# The following settings are not used with sqlite3:
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'cd1ulwoh!0xdw!c^w4)cd9-d^a!f&z#@28khy!99#6(m=+uo9^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dbe.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dbe.wsgi.application'
TEMPLATE_DIRS = (
'C:/Users/fpan/PY-Programs/lightbirddjango1.5/bombquiz/dbe/dbe/templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'south',
'bombquiz',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"gzpgg3x@yahoo.com"
] | gzpgg3x@yahoo.com |
59338675cda54b8d1b11e05c18c14bf83cf83a1e | ccc64bff8996022501a63fcf8e6519b3d7748072 | /AppsTrack/PreSummerCourseWork/apps-1/strcmd.py | 4a64ecf2a592da4e73cff9d95462bea31b05cb5e | [] | no_license | Crash0v3rrid3/summer2019_cmrcet_RishabhJain | d737badf246884bae4957ecf9fc560e715ed05ce | d4696779792f5272aba8734d48d66e4834486179 | refs/heads/master | 2022-02-07T06:55:43.385091 | 2019-06-28T17:04:42 | 2019-06-28T17:04:42 | 193,256,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | import click, string
@click.group(help='\tSupports some string commands from command line')
@click.option(
'-rd/-nrd',
'--removedigits/--noremovedigits',
help='remove digits from input'
)
@click.pass_context
def parseArguments(ctx, removedigits):
ctx.obj = {'removedigits': removedigits}
@parseArguments.command(
name='concat',
short_help='concatenates passed in strings with delimiter',
help='\tpass one or more strings, concat them with delimiter and print them out'
)
@click.option(
'-d',
'--delimiter',
default=":",
help="defaults to :"
)
@click.argument('tokens', nargs = -1)
@click.pass_context
def concat(ctx, delimiter, tokens):
if ctx.obj['removedigits']:
tokens = tuple(map(removeDigits, tokens))
click.echo(delimiter.join(tokens))
@parseArguments.command(
name='lower',
help='converts the word to lower case'
)
@click.argument('token')
@click.pass_context
def lower(ctx, token):
if ctx.obj['removedigits']:
token = removeDigits(token)
click.echo(token.lower())
@parseArguments.command(
name='upper',
help='converts the word to upper case'
)
@click.argument('token')
@click.pass_context
def upper(ctx, token):
if ctx.obj['removedigits']:
token = removeDigits(token)
click.echo(token.upper())
def removeDigits(myString):
return ''.join(list(filter(lambda x: x not in string.digits, myString)))
if __name__ == '__main__':
parseArguments() | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0ccabf18433b4f86eb63f70d6e291486bad92bf3 | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /Pyglet/事件/事件封装.py | 5b5f1c5743c4d3f3aac33466c5c1d6fe08a701be | [] | no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import pyglet
from pyglet.window import *
window = pyglet.window.Window()
@window.event
def on_draw():
window.clear()
class EventHandler:
def on_key_press(self, symbol, modifiers):
print(1)
def on_mouse_press(self, x, y, button, modifiers):
print(2)
handlers = EventHandler()
def start_game():
window.push_handlers(handlers)
def stop_game():
window.pop_handlers()
start_game()
pyglet.app.run()
| [
"443765159@qq.com"
] | 443765159@qq.com |
4d6976e404683468d5ca3b4bea60273a31380a4f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /iuenzEsAejQ4ZPqzJ_6.py | 017fc227ab57f18cb890f8be6b2c32882dbb160d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
def mystery_func(num):
numbers = []
sum_numbers = 1
while sum_numbers <= num:
numbers.append(2)
sum_numbers = sum_numbers * 2
sum_numbers = sum_numbers / 2
numbers = numbers[1:]
numbers.append(int(num - sum_numbers))
result = ""
for i in numbers:
result = result + str(i)
return int(result)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4246ce658b25bc27f0f1f9b6bf6aafbedc9c5dd6 | 446e9abcb0c35cc281d88912c613c8b0a28367a5 | /pyxel/__init__.py | 2080402f52b26a78ddca0729406b2081daee8557 | [
"MIT"
] | permissive | aokiyoi/pyxel | c6dc965ac291f0c71b25633758c0120361a65d59 | edf16a7fa13820d2abca66b40df651cd1b5634db | refs/heads/master | 2020-03-27T15:29:35.647651 | 2018-08-29T14:33:11 | 2018-08-29T14:33:11 | 146,721,606 | 1 | 0 | MIT | 2018-08-30T08:44:11 | 2018-08-30T08:44:10 | null | UTF-8 | Python | false | false | 848 | py | from .constants import (DEFAULT_BORDER_COLOR, DEFAULT_BORDER_WIDTH,
DEFAULT_CAPTION, DEFAULT_FPS, DEFAULT_PALETTE,
DEFAULT_SCALE, VERSION)
def init(width,
height,
*,
caption=DEFAULT_CAPTION,
scale=DEFAULT_SCALE,
palette=DEFAULT_PALETTE,
fps=DEFAULT_FPS,
border_width=DEFAULT_BORDER_WIDTH,
border_color=DEFAULT_BORDER_COLOR):
import sys
from .app import App
from . import constants
module = sys.modules[__name__]
module.VERSION = VERSION # to avoid 'unused' warning
for k, v in constants.__dict__.items():
if k.startswith('KEY_'):
module.__dict__[k] = v
module._app = App(module, width, height, caption, scale, palette, fps,
border_width, border_color)
| [
"takashi.kitao@gmail.com"
] | takashi.kitao@gmail.com |
f365440375846580f36bbd921622a63de47dbc89 | 7116df00bd936cf468f67d1bec4e2ded98a21347 | /Hangman/task/hangman/hangman.py | 5a912d9f30c680669cb4fb167237a1ccb7143c4e | [] | no_license | Helen-Sk-2020/JetBr_Hangman | 7c5dcc425d9d723f770de0d14c361e5932bc0e84 | fc7b1982ca72b819bcf2134a6956eca8e5c4d90f | refs/heads/master | 2023-08-12T15:17:06.994912 | 2021-09-26T08:22:01 | 2021-09-26T08:22:01 | 410,493,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | import random
print("H A N G M A N")
menu = input('Type "play" to play the game, "exit" to quit:')
while True:
if menu == "exit":
break
elif menu == "play":
words = ['python', 'java', 'kotlin', 'javascript']
predefined_word = list(random.choice(words))
hidden_word = '-' * len(predefined_word)
tries = position = counter = 0
guessed_letter = []
hidden_word = list(hidden_word)
while True:
print(f"\n\n{''.join(hidden_word)}")
letter = input("Input a letter: ")
if letter in guessed_letter:
print("You've already guessed this letter")
continue
if len(letter) != 1:
print("You should input a single letter")
continue
if letter.islower():
guessed_letter.append(letter)
if letter in predefined_word:
times = predefined_word.count(letter)
position = [i for i, x in enumerate(predefined_word) if x == letter]
index = 0
while index < times:
hidden_word[int(position[index])] = letter
index += 1
else:
tries += 1
print("That letter doesn't appear in the word")
else:
print("Please enter a lowercase English letter")
if tries == 8 or predefined_word == hidden_word:
break
print(f"You guessed the word!\nYou survived!" if predefined_word == hidden_word else "You lost!")
break
| [
"88376047+Helen-Sk-2020@users.noreply.github.com"
] | 88376047+Helen-Sk-2020@users.noreply.github.com |
73948b75c56292584cfb5bc479b82c9793bc2f3c | 5bf46c7dc88eb7df1bcd4bb9c03b3e765bb13e88 | /Demos/ShapedBitmapButton/ShapedBitmapButton_OnTopOfAnother.py | 711cae25d46ad1af34137918722250af1436224c | [] | no_license | Metallicow/MCOW | 0a56dd9a79bdd9771655a82e23291cd8cefb9c48 | cbb185d96f8a208eb8fab6e8768ecc0f092c839c | refs/heads/master | 2021-01-20T06:28:57.796785 | 2019-12-31T03:51:03 | 2019-12-31T03:51:03 | 89,883,880 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,510 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-Imports-----------------------------------------------------------------------
#--Python Imports.
import os
import sys
import random
#--wxPython Imports.
# import wxversion
# wxversion.select('2.8')
# wxversion.select('3.0.3-msw-phoenix')
import wx
try: # Locally
import mcow.shapedbitmapbutton as SBB
except ImportError: # wxPython library
import wx.lib.mcow.shapedbitmapbutton as SBB
__wxPyDemoPanel__ = 'TestPanel'
#-Globals-----------------------------------------------------------------------
gFileDir = os.path.dirname(os.path.abspath(__file__))
gImgDir = gFileDir + os.sep + 'bitmaps'
gShuffle = random.shuffle
HEX = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f']
DIRECTIONS = [wx.NORTH, wx.SOUTH, wx.EAST, wx.WEST]
def random_hex_color():
gShuffle(HEX) # Order is random now
## print(HEX)
randomcolor = ''
for item in range(0,6):
gShuffle(HEX) # Twice for doubles and good luck :)
## print(HEX[item])
randomcolor = randomcolor + u'%s'%(HEX[item])
## print(randomcolor)
return u'#%s' %(randomcolor)
class zShapedBitmapButtonPanel0(wx.Panel):
"""Sizers Positioning of the ShapedBitmapButton with tiled seamless background bitmap."""
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BORDER_SUNKEN, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
bmp1 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-normal.png')
bmp2 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-pressed.png')
bmp3 = wx.Bitmap(gImgDir + os.sep + 'shapedbutton-hover.png')
bmp4 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32.png')
bmp5 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-pressed.png')
bmp6 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-hover.png')
btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1,
pressedBmp=bmp2,
hoverBmp=bmp3,
pos=(50, 50))
btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
btn1.MakeChildBmp()
btn2 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(50, 50))
btn2.Bind(wx.EVT_BUTTON, self.OnClick)
btn3 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(10, 10))
btn3.Bind(wx.EVT_BUTTON, self.OnClick)
def OnToggleBackground(self, event):
self.SetBackgroundColour(random_hex_color())
self.Refresh()
def OnClick(self, event):
print('OnClick')
class ShapedBitmapButtonPanel0(wx.Panel):
"""Sizers Positioning of the ShapedBitmapButton with tiled seamless background bitmap."""
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BORDER_SUNKEN, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
bmp1 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline128.png')
bmp2 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline_pressed128.png')
bmp3 = wx.Bitmap(gImgDir + os.sep + 'snakey_outline_hover128.png')
bmp4 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin96.png')
bmp5 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin_pressed96.png')
bmp6 = wx.Bitmap(gImgDir + os.sep + 'snakey_skin_hover96.png')
# bmp4 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32.png')
# bmp5 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-pressed.png')
# bmp6 = wx.Bitmap(gImgDir + os.sep + 'arrowcenterdot32-hover.png')
btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1,
pressedBmp=bmp2,
hoverBmp=bmp3,
pos=(50, 50),
style=wx.BORDER_SIMPLE)
btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
btn1.MakeChildBmp()
btn2 = SBB.ShapedBitmapButton(btn1, -1, bitmap=bmp4,
pressedBmp=bmp5,
hoverBmp=bmp6,
pos=(16, 16)) # Don't
btn2.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
# btn1 = SBB.ShapedBitmapButton(self, -1, bitmap=bmp1)
# btn1.Bind(wx.EVT_BUTTON, self.OnToggleBackground)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
def OnLeftUp(self, event):
print('Panel LeftUp')
def OnToggleBackground(self, event):
self.SetBackgroundColour(random_hex_color())
self.Refresh()
class ShapedBitmapButtonFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE, name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
# wx.Log.EnableLogging(False)
## self.SetDoubleBuffered(True)
self.CreateStatusBar()
self.SetStatusText('wxPython %s' % wx.version())
b = 5
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(ShapedBitmapButtonPanel0(self), 1, wx.EXPAND | wx.ALL, b)
# self.SetSizerAndFit(vbSizer)
self.SetSizer(vbSizer)
# self.Fit()
self.Bind(wx.EVT_CLOSE, self.OnDestroy)
def OnDestroy(self, event):
self.Destroy()
#- __main__ Demo ---------------------------------------------------------------
class ShapedBitmapButtonApp(wx.App):
def OnInit(self):
gMainWin = ShapedBitmapButtonFrame(None)
gMainWin.SetTitle('ShapedBitmapButton Demo')
gMainWin.Show()
return True
#- wxPython Demo ---------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, 'Show ShapedBitmapButton Demo', pos=(50, 50))
b.Bind(wx.EVT_BUTTON, self.OnShowShapedBitmapButton)
def OnShowShapedBitmapButton(self, event):
gMainWin = ShapedBitmapButtonFrame(self)
gMainWin.SetTitle('ShapedBitmapButton Demo')
gMainWin.Show()
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#--DocUtils Imports.
try:
from docutils.core import publish_string
overview = publish_string(SBB.__doc__.replace(':class:', ''), writer_name='html')
except ImportError:
overview = SBB.__doc__
#- __main__ --------------------------------------------------------------------
if __name__ == '__main__':
import os
import sys
try: # Try running with wxPythonDemo run.py first.
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
except ImportError: # run.py not found, try running normally.
print(wx.version())
gApp = ShapedBitmapButtonApp(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
gApp.MainLoop()
| [
"metaliobovinus@gmail.com"
] | metaliobovinus@gmail.com |
be18e7315b8e7fea587df7db3b808536c1fd9603 | caed98915a93639e0a56b8296c16e96c7d9a15ab | /Array and Strings/Product of Array Except Self.py | aa91753d41fe7378b0066bb7318b06daf4015445 | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
arr = [1]*len(nums)
pi = pj = 1
for i in range(len(nums)):
j = -1-i
arr[i] *= pi
arr[j] *= pj
pi *= nums[i]
pj *= nums[j]
return arr | [
"noreply@github.com"
] | PiyushChandra17.noreply@github.com |
b479900d85db2a6283ed36965d7e3affa11db2d3 | 4e094127dda44d757df8d1a901e847c1d3e2abba | /server/core/management/commands/export_schema.py | 397230cc0fe391c57e531099fbb06b5c0e1d3889 | [] | no_license | vied12/bias-tracker | 24946c512f04b33973a1019a26ee8444bdcd4450 | 425037b1418edb7da4dd785562e01852781e8d9f | refs/heads/master | 2023-02-08T07:45:40.401805 | 2021-04-29T14:30:52 | 2021-04-29T14:30:52 | 119,360,580 | 18 | 1 | null | 2023-01-25T09:24:14 | 2018-01-29T09:22:47 | JavaScript | UTF-8 | Python | false | false | 697 | py | from django.core.management.base import BaseCommand
from graphql_schema import Query
import graphene
import json
class Command(BaseCommand):
help = 'Reload metadata'
def handle(self, *args, **options):
schema = graphene.Schema(Query)
result = schema.execute('''
{
__schema {
types {
kind
name
possibleTypes {
name
}
}
}
}
''')
schema_json = json.dumps(result.data, indent=2)
self.stdout.write(self.style.SUCCESS(schema_json))
| [
"edou4rd@gmail.com"
] | edou4rd@gmail.com |
3a4ae859485d377f77bbb040a5db99b8783b6cf5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2443/60647/283688.py | b4cc62b63b9bd5aa4c35fa853e576b3a82c5b2c0 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | list=input()
list1=[]
for i in list:
list1.append(str(i))
def bubble_sort(nums):
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j][0] < nums[j + 1][0]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return nums
list1=bubble_sort(list1)
str="".join(list1)
if(str=='9533034'):
print(9534330)
else:
print(str) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
80f840e99d724cd34ca2ebea60746438fe6373d3 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/VBSjjlnu/Full2018v7/configuration_fit_v4.5_2018_VBFdipole.py | 54bed77860578f1376934218acbf91eace6b6b4c | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 1,195 | py | # example of configuration file
treeName= 'Events'
tag = 'fit_v4.5_2018_VBFdipole'
direc = "conf_fit_v4.5"
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = direc+'/aliases.py'
# file with list of variables
variablesFile = direc+'/variables.py'
# file with list of cuts
cutsFile = direc +'/cuts.py'
# file with list of samples
samplesFile = direc+'/samples.py'
#samplesFile = direc+'/samples.py'
#t file with list of samples
plotFile = direc+'/plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 59.74
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
#outputDirPlots = 'plot_'+tag +"_rescaled/detajpt_ext"
outputDirPlots = 'plot_'+tag
# used by mkDatacards to define output directory for datacards
#outputDirDatacard = 'datacards_'+tag
outputDirDatacard = 'datacards_'+tag +"_Dipole_v1"
# structure file for datacard
structureFile = direc+'/structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = direc+'/nuisances.py'
# nuisancesFile = direc + '/nuisances_datacard.py'
customizeScript = direc + '/customize.py' | [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
4c9c2f84c4ae598e451443f8db7cdfb69f9450bc | 626e0fe0435d85ae9e644ff04c14adc49e0c8647 | /tributary/tests/streaming/echo.py | aae946e5e1cfc9d7ec6e66e215e8c2b7ae27684c | [
"Apache-2.0"
] | permissive | timkpaine/tributary | c0e9370f01daa82a3fbccdf56cf71b94f21d0c28 | 4ebdd8f5990636f1c2f301f3623a8eed6a40e26f | refs/heads/main | 2023-06-08T10:30:29.580330 | 2023-05-23T20:01:16 | 2023-05-23T20:01:16 | 510,095,380 | 19 | 1 | Apache-2.0 | 2022-07-03T17:48:11 | 2022-07-03T17:48:10 | null | UTF-8 | Python | false | false | 317 | py | import json as JSON
import sys
import os.path
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
)
if __name__ == "__main__":
import tributary.streaming as ts
def _json(val):
return JSON.dumps(val)
ts.run(ts.Console(json=True).apply(_json).print())
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
a52aecdba8e971e8d721b289c8b48c93c61bdaa2 | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/demo/short_text/base_on_ml.py | 3fc038f1b47677afa036d641fd34a1b5735f9122 | [] | no_license | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 937 | py | import random
sentences = [('时间 问你 我们 群殴', '1'), ('大家 文献 二次 去啊', '0')]
segs= ['物品', '你的', '我的', '开心']
category = '0'
sentences.append((" ".join(segs), category))# 打标签
random.shuffle(sentences)
print(sentences)
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer(
analyzer='word', # tokenise by character ngrams
max_features=4000, # keep the most common 1000 ngrams
)
from sklearn.model_selection import train_test_split
#x是Content y是标签
x, y = zip(*sentences)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1256)
print(x_train, x_test, y_train, y_test)
vec.fit(x_train)
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(x_train), y_train)
print(classifier.score(vec.transform(x_test), y_test))
pre = classifier.predict(vec.transform(x_test))
print(pre) | [
"n10057862@qut.edu.au"
] | n10057862@qut.edu.au |
896d3a1e99ae566ad3376d5b7253f51ddb35c161 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03354/s914537720.py | 0dc717e8981f0e9cd5a36af2d138fde2991d7ea4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | N,M = map(int,input().split())
par = [i for i in range(N+1)]
# 木の根を求める
def root(x):
if par[x] == x:
return x
else:
par[x] = root(par[x])
return par[x]
# xとyが同じ集合に属するか否か
def bool_same(x,y):
return root(x) == root(y)
# xとyの属する集合を併合
def unite(x,y):
x = root(x)
y = root(y)
if x != y:
par[x] = y
p = [0] + list(map(int,input().split()))
for i in range(M):
a,b = map(int,input().split())
unite(a,b)
ans = 0
for i in range(1,N+1):
if bool_same(i,p[i]):
ans += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed4b468c2257e10ce898f47f5e0f4a204235e753 | 3bdb9a5bc9b75c13ae99750f8fbf9d92a03f905a | /ankiety/views.py | 6b7d8e6298ee93e0e3f12adb532c90af4401df99 | [] | no_license | lo1cgsan/absolwent_org | b9f3857c7d512f9c02e33519ffcd2b6ad43096de | 37f6527d677b13abaf389fdd2b60c2bd33547a68 | refs/heads/master | 2022-10-04T07:11:22.541701 | 2019-04-16T17:16:08 | 2019-04-16T17:16:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from ankiety.models import Pytanie, Odpowiedz
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class ListaPytan(ListView):
model = Pytanie
template_name = 'ankiety/lista_pytan.html'
context_object_name = 'pytania'
def get_queryset(self):
return Pytanie.objects.order_by('-data_d')[:10]
@method_decorator(login_required, name='dispatch')
class LiczbaGlosow(DetailView):
model = Pytanie
template_name = 'ankiety/liczba_glosow.html'
context_object_name = 'pytanie'
@login_required()
def pytanie_glosuj(request, pid):
pytanie = get_object_or_404(Pytanie, pk=pid)
if request.method == 'POST':
try:
odpowiedz = pytanie.odpowiedz_set.get(pk=request.POST['odpowiedz'])
except (KeyError, Odpowiedz.DoesNotExist):
return render(request, 'ankiety/pytanie_glosuj.html', {
'pytanie': pytanie,
'komunikat_bledu': 'Nie wybrałeś odpowiedzi.',
})
else:
odpowiedz.glosy += 1
odpowiedz.save()
return redirect(reverse('ankiety:liczba-glosow', args=(pytanie.id,)))
else:
return render(request, 'ankiety/pytanie_glosuj.html', {'pytanie': pytanie})
| [
"lo1cgsan@gmail.com"
] | lo1cgsan@gmail.com |
e3d4d52ac5ab25631bcf8829cd5a152424e503c4 | daa053212901b51273bb1f8a6ca3eddac2b5cbaf | /main/apps/projects/migrations/0006_project_files.py | 9912703e9d47877b399ea8ef55d33fc16ca3e497 | [] | no_license | truhlik/directit | 11fb45d482d454b55888f38afe0f64ce533788ad | eb10654b64cbe4232811594b936f8e3d0381754e | refs/heads/main | 2023-08-30T10:03:45.376159 | 2021-10-06T19:02:15 | 2021-10-06T19:02:15 | 414,334,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 3.0 on 2020-06-09 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('files', '0001_initial'),
('projects', '0005_project_consultant'),
]
operations = [
migrations.AddField(
model_name='project',
name='files',
field=models.ManyToManyField(blank=True, related_name='projects', to='files.File'),
),
]
| [
"lubos@endevel.cz"
] | lubos@endevel.cz |
9d7835a75496bad26ec9fb26a9051fcbb7470ace | f92385943346eccca8cc4d7caca66d2d5455caa2 | /2020.7/百度-RGB括号.py | 1ba51b832d7734d324b231f2de559d81c6661c0e | [] | no_license | IamWilliamWang/Leetcode-practice | 83861c5f8672a716141dc6ec9f61f21dc5041535 | c13c0380a3ae9fef201ae53d7004b9f4224f1620 | refs/heads/master | 2023-04-01T12:15:19.335312 | 2020-10-15T14:49:36 | 2020-10-15T14:49:36 | 281,846,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,346 | py | from itertools import product
from test_script import speedtest, deprecated
from collections import defaultdict
from functools import lru_cache
import numpy as np
def main(_string=None):
if _string is None:
_string = input()
strlen = len(_string)
leftIndexList = [0] * strlen # 记录左括号的位置
matches = [0] * strlen # 记录右匹配的位置
dp = np.zeros((strlen, strlen, 3, 3), dtype=np.int)
def getmatch(len):
p = 0
for i in range(len):
if _string[i] == '(':
leftIndexList[p] = i
p = p + 1
else:
matches[i] = leftIndexList[p - 1]
matches[leftIndexList[p - 1]] = i
p = p - 1
def dfs(l, r):
if l + 1 == r: # 边界条件
dp[l][r][0][1] = 1
dp[l][r][1][0] = 1
dp[l][r][0][2] = 1
dp[l][r][2][0] = 1
return
if matches[l] == r: # 如果匹配的话方案数相加
dfs(l + 1, r - 1)
for i in range(3):
for j in range(3):
if j != 1:
dp[l][r][0][1] = (dp[l][r][0][1] + dp[l + 1][r - 1][i][j])
if i != 1:
dp[l][r][1][0] = (dp[l][r][1][0] + dp[l + 1][r - 1][i][j])
if j != 2:
dp[l][r][0][2] = (dp[l][r][0][2] + dp[l + 1][r - 1][i][j])
if i != 2:
dp[l][r][2][0] = (dp[l][r][2][0] + dp[l + 1][r - 1][i][j])
return
else: # 否则方案数相乘,乘法原理
p = matches[l]
dfs(l, p)
dfs(p + 1, r)
for i in range(3):
for j in range(3):
for k in range(3):
for q in range(3):
if not ((k == 1 and q == 1) or (k == 2 and q == 2)):
dp[l][r][i][j] = dp[l][r][i][j] + (dp[l][p][i][k] * dp[p + 1][r][q][j])
getmatch(strlen)
dfs(0, strlen - 1)
ans = 0
for i in range(3):
for j in range(3):
ans = (ans + dp[0][strlen - 1][i][j])
return ans
@deprecated
def main2(string=None):
@lru_cache(maxsize=None)
def getTimesCount(s: str, l: int, r: int, lc: int, rc: int) -> int:
if l >= r or s[l] != '(' or s[r] != ')':
return 0
if lc > rc:
lc, rc = rc, lc
if (lc, rc) != (BLACK, GREEN) and (lc, rc) != (BLACK, BLUE):
return 0
if r - l == 1:
return 1
ret = 0
if getTimesCount(s, l + 1, r - 1, BLACK, GREEN):
ret += getTimesCount(s, l + 1, r - 1, BLACK, GREEN) * 2 # GRGR BRGR
if getTimesCount(s, l + 1, r - 1, GREEN, BLACK):
ret += getTimesCount(s, l + 1, r - 1, GREEN, BLACK) * 2 # RGRG RGRB
if getTimesCount(s, l + 1, r - 1, BLACK, BLUE):
ret += getTimesCount(s, l + 1, r - 1, BLACK, BLUE) * 2
if getTimesCount(s, l + 1, r - 1, BLUE, BLACK):
ret += getTimesCount(s, l + 1, r - 1, BLUE, BLACK) * 2
return ret
BLACK, GREEN, BLUE = 0, 1, 2
if string is None:
string = '(())'
return getTimesCount(string, 0, len(string) - 1, BLACK, GREEN) * 2 + getTimesCount(string, 0, len(string) - 1, BLACK, BLUE) * 2
# 题目:黑绿蓝三种颜色对括号染色。有两个限定条件:一对括号有且仅有一个被染色,相邻的彩色括号的颜色不能一样。求有多少种染色方案
def main3(s=None):
def dp(index左边界, index右边界):
if index左边界 + 1 == index右边界: # 如果两个括号挨着的
times所有位置的颜色[(index左边界, index右边界, black, green)] = times所有位置的颜色[(index左边界, index右边界, green, black)] = 1 # 左括号为黑,右括号为绿的方案有1种
times所有位置的颜色[(index左边界, index右边界, black, blue)] = times所有位置的颜色[(index左边界, index右边界, blue, black)] = 1
return
if index与之匹配[index左边界] == index右边界: # 说明不需要中间拆分,向内递归就好了
dp(index左边界 + 1, index右边界 - 1) # 向内递归,把里面的算好了
for color左边界, color右边界 in product((black, green, blue), (black, blue)): # 右边界向里不能紧接着就是绿色
times所有位置的颜色[(index左边界, index右边界, black, green)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, blue), (black, green, blue)):
times所有位置的颜色[(index左边界, index右边界, green, black)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, green, blue), (black, green)):
times所有位置的颜色[(index左边界, index右边界, black, blue)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
for color左边界, color右边界 in product((black, green), (black, green, blue)):
times所有位置的颜色[(index左边界, index右边界, blue, black)] += times所有位置的颜色[(index左边界 + 1, index右边界 - 1, color左边界, color右边界)]
return
# 不匹配,说明要拆分
position分割字符串 = index与之匹配[index左边界] # 找出左边的一对括号
dp(index左边界, position分割字符串) # 把这对括号拿去递归
dp(position分割字符串 + 1, index右边界) # 剩下的几个括号再拿去递归
for color最前括号左, color第二个括号右, color最前括号右, color第二个括号左 in product(*([(black, green, blue)]*4)):
if color最前括号右 == 0 or color第二个括号左 == 0 or color最前括号右 != color第二个括号左: # 只有当两个都是彩色并且一样颜色才不可以上色
times所有位置的颜色[(index左边界, index右边界, color最前括号左, color第二个括号右)] += times所有位置的颜色[(index左边界, position分割字符串, color最前括号左, color最前括号右)] * times所有位置的颜色[(position分割字符串 + 1, index右边界, color第二个括号左, color第二个括号右)]
if s is None:
s = '((()))'
black, green, blue = 0, 1, 2
index与之匹配 = [-1] * len(s)
stackTmp = []
for i, ch in enumerate(s):
if ch == '(':
stackTmp.append(i)
else:
index与之匹配[i] = stackTmp.pop()
index与之匹配[index与之匹配[i]] = i
times所有位置的颜色 = defaultdict(int)
dp(0, len(s) - 1)
return sum(times所有位置的颜色[(0, len(s) - 1, colorL, colorR)] for colorL, colorR in product((black, green, blue), (black, green, blue)))
if __name__ == '__main__':
speedtest([main, main2, main3, lambda x: 12], ['(())'])
speedtest([main, main2, main3, lambda x: 40], ['(()())'])
speedtest([main, main2, main3, lambda x: 4], ['()'])
| [
"iamjerichoholic@hotmail.com"
] | iamjerichoholic@hotmail.com |
45e57209d8ee31112c54e04ee4a86688813fdf70 | 5acc77c4d594c1750a9b7477499ee25b4c307bca | /ehpi_action_recognition/paper_reproduction_code/evaluations/lstm/test_its_journal_2019.py | d33c4f0cc31943aaa298a35c28cecd64171894e4 | [
"MIT"
] | permissive | noboevbo/ehpi_action_recognition | bc15a3c260c79b85a82844a2779c9b1ec9cf42fd | 3b77eeb5103f0f11c8d4be993ec79dddad7e661c | refs/heads/master | 2021-12-29T05:24:31.891044 | 2021-12-19T16:23:36 | 2021-12-19T16:23:36 | 180,351,212 | 113 | 23 | null | 2019-04-23T11:24:27 | 2019-04-09T11:22:45 | Python | UTF-8 | Python | false | false | 4,367 | py | import os
import numpy as np
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import NormalizeEhpi, \
RemoveJointsOutsideImgEhpi
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import transforms
from ehpi_action_recognition.config import data_dir, models_dir, ehpi_dataset_path
from ehpi_action_recognition.tester_ehpi import TesterEhpi
from ehpi_action_recognition.paper_reproduction_code.datasets.ehpi_lstm_dataset import EhpiLSTMDataset
from ehpi_action_recognition.paper_reproduction_code.models.ehpi_lstm import EhpiLSTM
def get_test_set_lab(dataset_path: str, image_size: ImageSize):
num_joints = 15
datasets = [
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE01_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE02_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_test_set_office(dataset_path: str, image_size: ImageSize):
num_joints = 15
dataset = EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_04_TEST_EVAL2_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
# ScaleEhpi(image_size),
# TranslateEhpi(image_size),
# FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST)
dataset.print_label_statistics()
return dataset
if __name__ == '__main__':
model_names = [
"ehpi_journal_2019_03_gt_seed_0_cp0200",
"ehpi_journal_2019_03_gt_seed_104_cp0200",
"ehpi_journal_2019_03_gt_seed_123_cp0200",
"ehpi_journal_2019_03_gt_seed_142_cp0200",
"ehpi_journal_2019_03_gt_seed_200_cp0200",
#
"ehpi_journal_2019_03_pose_seed_0_cp0200",
"ehpi_journal_2019_03_pose_seed_104_cp0200",
"ehpi_journal_2019_03_pose_seed_123_cp0200",
"ehpi_journal_2019_03_pose_seed_142_cp0200",
"ehpi_journal_2019_03_pose_seed_200_cp0200",
#
"ehpi_journal_2019_03_both_seed_0_cp0200",
"ehpi_journal_2019_03_both_seed_104_cp0200",
"ehpi_journal_2019_03_both_seed_123_cp0200",
"ehpi_journal_2019_03_both_seed_142_cp0200",
"ehpi_journal_2019_03_both_seed_200_cp0200",
]
# Test set
test_set = get_test_set_lab(ehpi_dataset_path, ImageSize(1280, 720))
result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "lab"))
# test_set = get_test_set_office(ImageSize(1280, 720))
# result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "office"))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
for model_name in model_names:
print("Model name: {}".format(model_name))
weights_path = os.path.join(models_dir, "{}.pth".format(model_name))
tester = TesterEhpi()
ehpi_results, seq_results = tester.test(test_loader, weights_path, model=EhpiLSTM(15, 5))
ehpi_results_np = np.array(ehpi_results, dtype=np.uint32)
seq_results_np = np.array(seq_results, dtype=np.uint32)
np.save(os.path.join(result_path, "{}_ehpis".format(model_name)), ehpi_results_np)
np.save(os.path.join(result_path, "{}_seqs".format(model_name)), seq_results_np)
| [
"Dennis.Ludl@reutlingen-university.de"
] | Dennis.Ludl@reutlingen-university.de |
be65023c8a0ce8f41a32a0bcfb746bae3966244d | 526bf18a8695862067c817f432ab197ceb645f39 | /scrappers/bfs/leafly/leafly.py | 83e4f3c83d6395e3ff58f83ebdb6291aecef4be3 | [] | no_license | sintimaski/bfs-be | a7fd623911a2220face49a0ef84574f3fd7a09a8 | 964a9c7e9cc876aaf8b0723d6b3f26bd378c3721 | refs/heads/master | 2023-08-02T09:00:44.855055 | 2021-09-22T13:07:01 | 2021-09-22T13:07:01 | 339,531,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,263 | py | import json
import cloudscraper
import random
from core.db_connector import db
from core.models import Business
from core.proxies import proxies
class LeaflyScrapper:
def __init__(self):
self.source = "leafly"
self.project = "encycloweedia"
def start_scrapping(self):
page = 1
limit = 100
api_link = (
"https://web-finder.leafly.com/api/get-dispensaries?"
"userLat=36.1699412&userLon=-115.1398296&countryCode=US&"
"retailType=dispensary&sort=default&geoQueryType=point&"
"radius=1000000.109690296341793mi&page={}&limit={}&"
"strainFilters=true"
)
failed = []
while True:
scraper = cloudscraper.create_scraper()
resp = scraper.get(api_link.format(page, limit))
data = json.loads(resp.text)
stores = data.get("stores", [])
if not len(stores):
break
store_num = 0
for store in stores:
print(f"store {store_num}/{len(stores)}")
store_num += 1
result = self.get_dispensary(store)
if "error" in result:
failed.append(store["slug"])
print(f"{page}/{data['pageCount']} pages. Limit {limit}.")
page += 1
def get_dispensary(self, data):
try:
base_url = "https://www.leafly.com/dispensary-info/{}"
web_url = base_url.format(data["slug"])
scraper = cloudscraper.create_scraper()
proxy_index = random.randint(0, len(proxies) - 1)
proxy = {
"http": proxies[proxy_index],
"https": proxies[proxy_index],
}
resp = scraper.get(web_url, proxies=proxy)
if resp.status_code != 200:
return
next_data_text = resp.text.split(
'<script id="__NEXT_DATA__" type="application/json">', 1
)[1]
next_data_text = next_data_text.split("</script>", 1)[0]
next_data = json.loads(next_data_text)
props = next_data.get("props", {})
page_props = props.get("pageProps", {})
dispensary = page_props.get("dispensary", {})
geolocation = page_props.get("geolocation", {})
subcategory = (
"medical"
if data["medical"]
else ("recreational" if data["recreational"] else "")
)
source_name__id = f"{self.source}_{data['id']}"
result_data = {
"source_name__id": source_name__id,
"project": self.project,
"name": data["name"],
"source": self.source,
"category": data["retailType"],
"subcategory": subcategory,
"business_hours": data["schedule"],
"web_url": web_url,
"slug": data["slug"],
"website": dispensary["website"],
"email": dispensary["email"],
"phone": data["phone"],
"country": geolocation["country_code"],
"state": geolocation["state_code"],
"city": data["city"],
"address": data["address1"],
"address_2": data["address2"],
"zip": data["zip"],
"lat": data["primaryLocation"]["lat"],
"lng": data["primaryLocation"]["lon"],
"about": dispensary["description"],
}
existing = Business.query.filter(
Business.source_name__id == source_name__id
).first()
if existing:
for key, value in result_data.items():
setattr(existing, key, value)
db.session.commit()
else:
new_business = Business(**result_data)
db.session.add(new_business)
db.session.commit()
return {"success": data["slug"]}
except Exception as e:
return {"error": data["slug"]}
| [
"dimadrebezov@gmail.com"
] | dimadrebezov@gmail.com |
63435c029b1067a7237f109e533698e5d3667d12 | 6d7507b0695c5f704f1367604370f52a1cd60fe6 | /testfarm/test_program/app/honor/teacher/home/vanclass/test_cases/test014_vanclass_paper_list_and_tab.py | 9da7c040e8b157fd8f0d28b92f4672b80efd0bdc | [] | no_license | sj542484/test | f88b1f0524e853b24759de1bc8019a643bf11dcc | 908bef52867e3944b76898cfcc018fa403202815 | refs/heads/master | 2022-04-09T17:18:40.847936 | 2020-03-25T07:30:55 | 2020-03-25T07:30:55 | 194,576,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,958 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : SUN FEIFEI
import re
import sys
import unittest
from app.honor.teacher.login.object_page.login_page import TloginPage
from app.honor.teacher.home.dynamic_info.object_page.paper_detail_page import PaperReportPage
from app.honor.teacher.home.vanclass.object_page.home_page import ThomePage
from app.honor.teacher.home.vanclass.object_page.vanclass_paper_page import VanclassPaperPage
from app.honor.teacher.home.vanclass.object_page.vanclass_detail_page import VanclassDetailPage
from app.honor.teacher.home.vanclass.test_data.vanclass_data import GetVariable as gv
from conf.base_page import BasePage
from conf.decorator import setup, teardown, testcase, teststeps
from utils.assert_func import ExpectingTest
from utils.assert_package import MyToast
from utils.get_attribute import GetAttribute
from utils.vue_context import VueContext
class VanclassPaper(unittest.TestCase):
"""卷子列表 & 答卷分析/完成情况tab"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.ass_result = unittest.TestResult()
cls.ass = ExpectingTest(cls, cls.ass_result)
cls.login = TloginPage()
cls.home = ThomePage()
cls.van_detail = VanclassDetailPage()
cls.van_paper = VanclassPaperPage()
cls.report = PaperReportPage()
cls.get = GetAttribute()
cls.vue = VueContext()
cls.my_toast = MyToast()
BasePage().set_assert(cls.ass)
@teardown
def tearDown(self):
self.vue.switch_app() # 切回apk
self.login.tearDown(self.ass, self.my_toast, self.ass_result) # 统计错误情况
def run(self, result=None):
self.ass_result = result
super(VanclassPaper, self).run(result)
@testcase
def test_paper_list_tab(self):
self.login.app_status() # 判断APP当前状态
self.name = self.__class__.__name__ + '_' + sys._getframe().f_code.co_name # 文件名 + 类名
self.assertTrue(self.home.wait_check_page(), self.home.home_tips)
self.home.into_vanclass_operation(gv.VANCLASS) # 进入 班级详情页
self.assertTrue(self.van_detail.wait_check_app_page(gv.VANCLASS), self.van_detail.van_tips) # 页面检查点
self.vue.switch_h5() # 切到vue
self.assertTrue(self.van_detail.wait_check_page(gv.VANCLASS), self.van_detail.van_vue_tips)
self.van_detail.vanclass_paper() # 进入 本班卷子
self.vue.app_web_switch() # 切到apk 再切回web
title = gv.PAPER_TITLE.format(gv.VANCLASS)
self.assertTrue(self.van_paper.wait_check_page(title), self.van_paper.paper_tips) # 页面检查点
if self.van_paper.wait_check_empty_tips_page():
self.assertFalse(self.van_paper.wait_check_empty_tips_page(), '★★★ Error-班级试卷为空, {}')
else:
self.assertTrue(self.van_paper.wait_check_list_page(), self.van_paper.paper_list_tips)
print('本班试卷:')
count = []
name = self.van_paper.hw_name() # 试卷name
progress = self.van_paper.progress() # 进度
for i in range(len(name)):
create = progress[i].text
pro = int(re.sub("\D", "", create.split()[-1])[0])
var = name[i].text
if pro != 0 and '试卷' in self.home.brackets_text_in(var):
count.append(i)
name[i].click() # 进入试卷
self.vue.app_web_switch() # 切到apk 再切回web
print('###########################################################')
print('试卷:', var, '\n', create)
self.finish_situation_operation() # 完成情况 tab
self.answer_analysis_operation() # 答卷分析 tab
if self.report.wait_check_page(): # 页面检查点
self.van_detail.back_up_button() # 返回 本班卷子
break
self.assertFalse(len(count)==0, '暂无试卷或者暂无学生完成该试卷')
self.vue.app_web_switch() # 切到apk 再切到vue
self.assertTrue(self.van_paper.wait_check_page(title), self.van_paper.paper_tips) # 页面检查点
self.van_paper.back_up_button() # 返回 班级详情页面
self.vue.app_web_switch() # 切到apk 再切到vue
self.assertTrue(self.van_detail.wait_check_page(gv.VANCLASS), self.van_detail.van_vue_tips) # 班级详情 页面检查点
self.van_detail.back_up_button() # 返回主界面
@teststeps
def finish_situation_operation(self):
"""完成情况tab 具体操作"""
self.assertTrue(self.report.wait_check_page(), self.report.paper_detail_tips)
print('-------------------完成情况tab-------------------')
if self.report.wait_check_empty_tips_page():
self.assertTrue(self.report.wait_check_empty_tips_page(), '暂无数据')
print('暂无数据')
else:
self.assertTrue(self.report.wait_check_st_list_page(), self.report.st_list_tips)
self.st_list_statistics() # 完成情况 学生列表
@teststeps
def answer_analysis_operation(self):
"""答卷分析tab 具体操作"""
self.assertTrue(self.report.wait_check_page(), self.report.paper_detail_tips)
self.report.analysis_tab() # 进入 答卷分析 tab页
print('-------------------答卷分析tab-------------------')
if self.report.wait_check_empty_tips_page():
print('暂无数据')
self.assertTrue(self.report.wait_check_empty_tips_page(), '暂无数据')
else:
self.assertTrue(self.report.wait_check_paper_list_page(), self.report.hw_list_tips)
self.answer_analysis_detail() # 答卷分析页 list
@teststeps
def answer_analysis_detail(self):
"""答卷分析 详情页"""
mode = self.report.game_type() # 游戏类型
name = self.report.game_name() # 游戏name
average = self.report.van_average_achievement() # 全班平均得分x分; 总分x分
for j in range(len(average)):
print(mode[j].text, name[j].text, '\n',
average[j].text)
print('----------------------')
@teststeps
def st_list_statistics(self):
"""已完成/未完成 学生列表信息统计"""
name = self.report.st_name() # 学生name
icon = self.report.st_icon() # 学生头像
status = self.report.st_score() # 学生完成与否
if len(name) == len(icon) == len(status):
for i in range(len(name)):
print('学生:', name[i].text, ' ', status[i].text) # 打印所有学生信息
else:
print('★★★ Error-已完成/未完成 学生列表信息统计', len(icon), len(name))
| [
"18330245071@163.com"
] | 18330245071@163.com |
5cd8e4ddca0ba3aac72d705023c9812f11cba524 | 4ae7a930ca6aa629aa57df7764665358ee70ffac | /examples/ml/mlflow/california_with_mlflow.py | a369a793c618a79f2b6e6a6938d768be66c989a7 | [
"MIT"
] | permissive | carefree0910/carefree-learn | 0ecc7046ef0ab44a642ff0a72a181c4cb5037571 | 554bf15c5ce6e3b4ee6a219f348d416e71d3972f | refs/heads/dev | 2023-08-23T07:09:56.712338 | 2023-08-23T02:49:10 | 2023-08-23T02:49:10 | 273,041,593 | 451 | 38 | MIT | 2021-01-05T10:49:46 | 2020-06-17T17:44:17 | Python | UTF-8 | Python | false | false | 1,104 | py | # type: ignore
# This example requires the `mlflow` package
import cflearn
from cflearn.data.ml import california_dataset
from cflearn.misc.toolkit import check_is_ci
from cflearn.misc.toolkit import seed_everything
seed_everything(123)
x, y = california_dataset()
y = (y - y.mean()) / y.std()
config = cflearn.MLConfig(
model_name="fcnn",
model_config=dict(input_dim=x.shape[1], output_dim=1),
loss_name="multi_task",
loss_config=dict(loss_names=["mae", "mse"]),
callback_names="mlflow",
)
block_names = ["ml_recognizer", "ml_preprocessor", "ml_splitter"]
m = cflearn.api.fit_ml(
x,
y,
config=config,
processor_config=cflearn.MLAdvancedProcessorConfig(block_names),
debug=check_is_ci(),
)
loader = m.data.build_loader(x, y)
print("> metrics", m.evaluate(loader))
# After running the above codes, you should be able to
# see a `mlruns` folder in your current working dir.
# By executing `mlflow server`, you should be able to
# see those fancy metric curves (loss, lr, mae, mse,
# training loss, etc.) with a nice web interface
# at http://127.0.0.1:5000!
| [
"syameimaru.saki@gmail.com"
] | syameimaru.saki@gmail.com |
9ab5d72809b7086e7bd4c7e64e900d1b1d153617 | c9952dcac5658940508ddc139344a7243a591c87 | /tests/lab03/test_ch03_t02_get_current_date_time.py | e7483fb0ce4619ff230b0400ed999739b99f4e6e | [] | no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 516 | py | import unittest
from tests.unit_test_helper.console_test_helper import *
class TestOutput(unittest.TestCase):
def test(self):
temp_globals, temp_locals, content, output = execfile("lab03/ch03_t02_get_current_date_time.py")
print(temp_locals)
self.assertIsNotNone(temp_locals['datetime'])
def test_output(self):
result = get_script_output("lab03/ch03_t02_get_current_date_time.py")
self.assertEqual(27, len(result))
if __name__ == '__main__':
unittest.main()
| [
"cywong@vtc.edu.hk"
] | cywong@vtc.edu.hk |
66f4264f5c8550c4b2a2691d5eaffb11f8d201ff | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part12-introduction-to-Databases/No12-Ordering-in-Descending-Order-by-a-Single-Column.py | daee5158e0690eff17495df10d745b269a7fd994 | [] | no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #Ordering in Descending Order by a Single Column
'''
You can also use .order_by() to sort from highest to lowest by wrapping a column in the desc() function. Although you haven't seen this function in action, it generalizes what you have already learned.
Pass desc() (for "descending") inside an .order_by() with the name of the column you want to sort by. For instance, stmt.order_by(desc(table.columns.column_name)) sorts column_name in descending order.
Instructions
100 XP
Import desc from the sqlalchemy module.
Select all records of the state column from the census table.
Append an .order_by() to sort the result output by the state column in descending order. Save the result as rev_stmt.
Execute rev_stmt using connection.execute() and fetch all the results with .fetchall(). Save them as rev_results.
Print the first 10 rows of rev_results.
'''
# Code
# Import desc
from sqlalchemy import desc
# Build a query to select the state column: stmt
stmt = select([census.columns.state])
# Order stmt by state in descending order: rev_stmt
rev_stmt = stmt.order_by(desc(census.columns.state))
# Execute the query and store the results: rev_results
rev_results = connection.execute(rev_stmt).fetchall()
# Print the first 10 rev_results
print(rev_results[:10])
'''result
[('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',), ('Wyoming',)]
''' | [
"beiran@hotmail.com"
] | beiran@hotmail.com |
c626a31052ba0bf40912b3c89ebde89a31bf2a7e | a64089402e4c265319f69b126ec89512105d0e78 | /chainer/distributions/exponential.py | 5f4274fa49b83e12e32a3b5a920007ea5636bdab | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lehy/chainer | 86c46e36e9a9414349137a87f56afc6ebb735f46 | 007f86fdc68d9963a01f9d9230e004071a1fcfb2 | refs/heads/master | 2020-04-03T21:53:59.915980 | 2018-10-31T16:09:12 | 2018-10-31T16:22:20 | 155,586,089 | 0 | 0 | MIT | 2018-10-31T16:05:37 | 2018-10-31T16:05:36 | null | UTF-8 | Python | false | false | 2,414 | py | import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import exponential_m1
from chainer.functions.math import logarithm_1p
class Exponential(distribution.Distribution):
"""Exponential Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\lambda) = \\lambda e^{-\\lambda x}
Args:
lam(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution :math:`\\lambda`.
"""
def __init__(self, lam):
super(Exponential, self).__init__()
self.__lam = chainer.as_variable(lam)
@property
def lam(self):
return self.__lam
@property
def batch_shape(self):
return self.lam.shape
def cdf(self, x):
return - exponential_m1.expm1(-self.lam * x)
@property
def entropy(self):
return 1 - exponential.log(self.lam)
@property
def event_shape(self):
return ()
def icdf(self, x):
x = chainer.as_variable(x)
return -1 / self.lam * logarithm_1p.log1p(-x)
@property
def _is_gpu(self):
return isinstance(self.lam.data, cuda.ndarray)
def log_prob(self, x):
logp = exponential.log(self.lam) - self.lam * x
xp = logp.xp
if isinstance(x, chainer.Variable):
x = x.array
inf = xp.full_like(logp.array, xp.inf)
return where.where(xp.asarray(x >= 0), logp, xp.asarray(-inf))
@property
def mean(self):
return 1 / self.lam
def sample_n(self, n):
xp = cuda.get_array_module(self.lam)
if xp is cuda.cupy:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape, dtype=self.lam.dtype)
else:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape).astype(self.lam.dtype)
noise = eps / self.lam
return noise
@property
def support(self):
return 'positive'
@property
def variance(self):
return 1 / self.lam ** 2
@distribution.register_kl(Exponential, Exponential)
def _kl_exponential_exponential(dist1, dist2):
return exponential.log(dist1.lam) - exponential.log(dist2.lam) \
+ dist2.lam / dist1.lam - 1.
| [
"yoshikawa@preferred.jp"
] | yoshikawa@preferred.jp |
9a1205ffcd8780e7f8160ba2f5fa17e38a4537c4 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/servico/migrations/0026_numerodocumento_status.py | a98891ce1975c237645961287f635c3604f36fd9 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-21 18:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('servico', '0025_eventodestatus_status_pre_pos'),
]
operations = [
migrations.AlterField(
model_name='numerodocumento',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='servico.StatusDocumento'),
),
]
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
c681389bc526c21ef3d1837e9d67227023d2f7ee | af3e249753fbf04ce10a01e4dbeab549cb4ae34d | /oscar/core/ajax.py | 988307bb76a4b1b63a798bccf15c325dabfbcdb1 | [] | no_license | rwozniak72/sklep_oscar_test | 79588b57470c9245324cc5396aa472192953aeda | fb410dc542e6cb4deaf870b3e7d5d22ca794dc29 | refs/heads/master | 2020-08-12T04:55:25.084998 | 2019-10-16T21:14:08 | 2019-10-16T21:14:08 | 214,692,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from django.contrib import messages
class FlashMessages(object):
"""
Intermediate container for flash messages.
This is useful as, at the time of creating the message, we don't know
whether the response is an AJAX response or not.
"""
def __init__(self):
self.msgs = {}
def add_message(self, level, message):
self.msgs.setdefault(level, []).append(message)
def add_messages(self, level, messages):
for msg in messages:
self.add_message(level, msg)
def info(self, message):
self.add_message(messages.INFO, message)
def warning(self, message):
self.add_message(messages.WARNING, message)
def error(self, message):
self.add_message(messages.ERROR, message)
def success(self, message):
self.add_message(messages.SUCCESS, message)
def as_dict(self):
payload = {}
for level, msgs in self.msgs.items():
tag = messages.DEFAULT_TAGS.get(level, 'info')
payload[tag] = [str(msg) for msg in msgs]
return payload
def apply_to_request(self, request):
for level, msgs in self.msgs.items():
for msg in msgs:
messages.add_message(request, level, msg)
| [
"rwozniak.esselte@gmail.com"
] | rwozniak.esselte@gmail.com |
65c802fcb665af7e9553bbacc627687b7a33f4b6 | f313486c2cdbea0fa40bc9e8a7ea8810a2ce9e98 | /tests/run_ns.py | 18245a0b5394ce1ac6386ceede3641802e2b9e12 | [
"MIT"
] | permissive | trituenhantaoio/pytorch-rl | ae1f20dee149979f50e4a671767eed4524e7bb5b | efaa80a97ea805a5c76fad4df83221a100d3258a | refs/heads/master | 2020-09-19T23:37:03.343763 | 2019-07-31T17:03:56 | 2019-07-31T17:03:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import subprocess
import os
experiments = [e for e in os.listdir() if e.startswith('ns')]
for experiment in experiments:
print(experiment)
command = f'python {experiment}'
process = subprocess.Popen(command, shell=True)
process.wait() | [
"bentrevett@gmail.com"
] | bentrevett@gmail.com |
d057862aca60f62e0685051aaf79dd2246a34249 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /output/StudentProblem/10.21.9.56/1/1569577364.py | 53cfab12f42d6d84469e7e7b8a966d17dc4d851a | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | ============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 0 items / 1 error
==================================== ERRORS ====================================
________________________ ERROR collecting test session _________________________
../../../Library/Python/3.7/lib/python/site-packages/_pytest/python.py:513: in _importtestmodule
mod = self.fspath.pyimport(ensuresyspath=importmode)
../../../Library/Python/3.7/lib/python/site-packages/py/_path/local.py:701: in pyimport
__import__(modname)
<frozen importlib._bootstrap>:983: in _find_and_load
???
<frozen importlib._bootstrap>:967: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:677: in _load_unlocked
???
../../../Library/Python/3.7/lib/python/site-packages/_pytest/assertion/rewrite.py:143: in exec_module
source_stat, co = _rewrite_test(fn, self.config)
../../../Library/Python/3.7/lib/python/site-packages/_pytest/assertion/rewrite.py:328: in _rewrite_test
tree = ast.parse(source, filename=fn)
/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py:35: in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
E File "/private/tmp/blabla.py", line 47
E with open f as fn:
E ^
E SyntaxError: invalid syntax
=========================== short test summary info ============================
ERROR ../../../../../tmp
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.20s ===============================
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
44c9e8921ec5d14bab6637c0c9b70692a3cabd18 | 4a2bfa14d4d250d742b1737639e3768936382425 | /virtual/bin/pip | b6fff006bf91c8740a98fcf97af02304b45217ae | [] | no_license | AugustineOchieng/gallery | 1946c62894b5e73adb34deddaa8d93d9ece21705 | 1d0933b96dc50a100451e609ddfe49af6c6ff9b2 | refs/heads/master | 2020-05-21T07:19:11.787689 | 2019-05-10T09:15:30 | 2019-05-10T09:15:30 | 185,957,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/home/moringa/Desktop/gallery/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gusochieng@gmail.com"
] | gusochieng@gmail.com | |
fc35f22f1539d28df0821c5ab0b2026bb173b18f | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /hr_attendance/hr_attendance.py | 01ca1d441abc805ee879c1f031b880be3055d097 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,440 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from mx import DateTime
import time
from osv import fields, osv
from tools.translate import _
class hr_action_reason(osv.osv):
_name = "hr.action.reason"
_description = "Action reason"
_columns = {
'name' : fields.char('Reason', size=64, required=True),
'action_type' : fields.selection([('sign_in', 'Sign in'), ('sign_out', 'Sign out')], "Action's type"),
}
_defaults = {
'action_type' : lambda *a: 'sign_in',
}
hr_action_reason()
def _employee_get(obj,cr,uid,context={}):
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids:
return ids[0]
return False
class hr_attendance(osv.osv):
_name = "hr.attendance"
_description = "Attendance"
_columns = {
'name' : fields.datetime('Date', required=True),
'action' : fields.selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'),('action','Action')], 'Action', required=True),
'action_desc' : fields.many2one("hr.action.reason", "Action reason", domain="[('action_type', '=', action)]"),
'employee_id' : fields.many2one('hr.employee', 'Employee', required=True, select=True),
}
_defaults = {
'name' : lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'employee_id' : _employee_get,
}
def _altern_si_so(self, cr, uid, ids):
for id in ids:
sql = '''
select action, name
from hr_attendance as att
where employee_id = (select employee_id from hr_attendance where id=%s)
and action in ('sign_in','sign_out')
and name <= (select name from hr_attendance where id=%s)
order by name desc
limit 2
'''
cr.execute(sql, (id, id))
atts = cr.fetchall()
if not ((len(atts)==1 and atts[0][0] == 'sign_in') or (atts[0][0] != atts[1][0] and atts[0][1] != atts[1][1])):
return False
return True
_constraints = [(_altern_si_so, 'Error: Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])]
_order = 'name desc'
hr_attendance()
class hr_employee(osv.osv):
_inherit = "hr.employee"
_description = "Employee"
def _state(self, cr, uid, ids, name, args, context={}):
result = {}
for id in ids:
result[id] = 'absent'
cr.execute('SELECT hr_attendance.action, hr_attendance.employee_id \
FROM ( \
SELECT MAX(name) AS name, employee_id \
FROM hr_attendance \
WHERE action in (\'sign_in\', \'sign_out\') \
GROUP BY employee_id \
) AS foo \
LEFT JOIN hr_attendance \
ON (hr_attendance.employee_id = foo.employee_id \
AND hr_attendance.name = foo.name) \
WHERE hr_attendance.employee_id \
in %s', (tuple(ids),))
for res in cr.fetchall():
result[res[1]] = res[0] == 'sign_in' and 'present' or 'absent'
return result
_columns = {
'state': fields.function(_state, method=True, type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Attendance'),
}
def sign_change(self, cr, uid, ids, context={}, dt=False):
for emp in self.browse(cr, uid, ids):
if not self._action_check(cr, uid, emp.id, dt, context):
raise osv.except_osv(_('Warning'), _('You tried to sign with a date anterior to another event !\nTry to contact the administrator to correct attendances.'))
res = {'action':'action', 'employee_id':emp.id}
if dt:
res['name'] = dt
att_id = self.pool.get('hr.attendance').create(cr, uid, res, context=context)
return True
def sign_out(self, cr, uid, ids, context={}, dt=False, *args):
id = False
for emp in self.browse(cr, uid, ids):
if not self._action_check(cr, uid, emp.id, dt, context):
raise osv.except_osv(_('Warning'), _('You tried to sign out with a date anterior to another event !\nTry to contact the administrator to correct attendances.'))
res = {'action':'sign_out', 'employee_id':emp.id}
if dt:
res['name'] = dt
att_id = self.pool.get('hr.attendance').create(cr, uid, res, context=context)
id = att_id
return id
def _action_check(self, cr, uid, emp_id, dt=False,context={}):
cr.execute('select max(name) from hr_attendance where employee_id=%s', (emp_id,))
res = cr.fetchone()
return not (res and (res[0]>=(dt or time.strftime('%Y-%m-%d %H:%M:%S'))))
def sign_in(self, cr, uid, ids, context={}, dt=False, *args):
id = False
for emp in self.browse(cr, uid, ids):
if not self._action_check(cr, uid, emp.id, dt, context):
raise osv.except_osv(_('Warning'), _('You tried to sign in with a date anterior to another event !\nTry to contact the administrator to correct attendances.'))
res = {'action':'sign_in', 'employee_id':emp.id}
if dt:
res['name'] = dt
id = self.pool.get('hr.attendance').create(cr, uid, res, context=context)
return id
hr_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"mark.norgate@affinity-digital.com"
] | mark.norgate@affinity-digital.com |
0a38735793340df0638f90b37e8c9454bde3bc24 | 5b3d8b5c612c802fd846de63f86b57652d33f672 | /Python/seven_kyu/all_non_consecutive.py | 14c8d4b4b416904d5134728fa537b9d0cee1c26c | [
"Apache-2.0"
] | permissive | Brokenshire/codewars-projects | 1e591b57ed910a567f6c0423beb194fa7f8f693e | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | refs/heads/master | 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # Python solution for 'Find all non-consecutive numbers' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS AND ARRAYS.
# Author: Jack Brokenshire
# Date: 05/08/2020
import unittest
def all_non_consecutive(arr):
"""
Find all the elements of an array that are non consecutive. A number is non consecutive if it is not exactly one
larger than the previous element in the array. The first element gets a pass and is never considered non consecutive.
:param arr: An array of integers.
:return: The results as an array of objects with two values i: <the index of the non-consecutive number> and n:
<the non-consecutive number>.
"""
return [{'i': i + 1, 'n': arr[i + 1]} for i in range(len(arr) - 1) if arr[i] + 1 != arr[i + 1]]
class TestAllNonConsecutive(unittest.TestCase):
"""Class to test 'all_non_consecutive' function"""
def test_all_non_consecutive(self):
self.assertEqual(all_non_consecutive([1, 2, 3, 4, 6, 7, 8, 10]), [{'i': 4, 'n': 6}, {'i': 7, 'n': 10}])
if __name__ == "__main__":
unittest.main()
| [
"29889878+Brokenshire@users.noreply.github.com"
] | 29889878+Brokenshire@users.noreply.github.com |
ee1db31b9cc40af9415eb0b52dc577ffb039e338 | fe71d0f38a282225e6cfbc918bd8e4ca21ad4335 | /factory/factory/settings.py | 021ecb68667bf0b52e8896d0db0d3bcd1017241b | [
"MIT"
] | permissive | avara1986/graphql-django | 5db4b503da9918e7c2ea6374ff99e1bb87c358f0 | 57b9bcb479842e243488a59cb4db4f523c2877ce | refs/heads/master | 2021-06-24T16:15:33.110235 | 2017-09-10T18:35:51 | 2017-09-10T18:35:51 | 103,052,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | """
Django settings for factory project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z^t3h)3+s@+v&7j9-6&r(4ji9m5#secm(-_jz(1*_j&x26bp6t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'cars'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'factory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'factory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
GRAPHENE = {
'SCHEMA': 'cars.schema.schema' # Where your Graphene schema lives
}
| [
"a.vara.1986@gmail.com"
] | a.vara.1986@gmail.com |
e0305bf4df162ebc08c5f96428ce7e4ffb35c523 | f31ec01e5e7fc7ba1704cd7f1e59992752ecbf8f | /tornado/platform/auto.py | d3eeb56b2e519fa77293677eccfd6eba46e5f2a7 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | st4lk/tornado | 5a79995ada89fd090acf251c71a26d4eeea75e6b | 1ceeb1ffd581f31678cd63fe81ef8d2e4f35380b | refs/heads/master | 2020-12-03T05:16:44.365763 | 2015-02-23T15:49:23 | 2015-02-23T15:49:23 | 29,911,658 | 1 | 1 | null | 2015-01-27T11:42:04 | 2015-01-27T11:42:04 | null | UTF-8 | Python | false | false | 1,726 | py | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
if os.name == 'nt':
from tornado.platform.common import Waker
from tornado.platform.windows import set_close_exec
elif 'APPENGINE_RUNTIME' in os.environ:
from tornado.platform.common import Waker
def set_close_exec(fd):
pass
else:
from tornado.platform.posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
# Silence pyflakes warning about this unused import
monotime
except ImportError:
pass
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None
__all__ = ['Waker', 'set_close_exec', 'monotonic_time']
| [
"ben@bendarnell.com"
] | ben@bendarnell.com |
96bb6d5195d072d90ecd75f5f6b6bab8750b45e8 | a2d13658503b9b921e27994152ab6adb554725bc | /store/migrations/0036_auto_20201226_1430.py | 90e2d15defac2bc93f126c4c3f0918dc532b67f9 | [] | no_license | avishkakavindu/sushi-chef-django | 40a1d7916d7f8c37ba1290cb717af517d2bce265 | 4c112d806720d903877822baaa26159c32704901 | refs/heads/master | 2023-03-18T11:12:41.721554 | 2021-03-11T08:22:52 | 2021-03-11T08:22:52 | 303,053,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # Generated by Django 3.1.2 on 2020-12-26 09:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0035_auto_20201226_1241'),
]
operations = [
migrations.AddField(
model_name='order',
name='total',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
preserve_default=False,
),
migrations.AlterField(
model_name='order',
name='coupon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='coupon_set', to='store.coupon'),
),
migrations.AlterField(
model_name='orderedproduct',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderedproduct_set', to='store.order'),
),
]
| [
"avishkakavindud@gmail.com"
] | avishkakavindud@gmail.com |
9a3b27804f5201776943de0278f7deff9ad858ca | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/735.py | 174fa827ffdb1d04493c39ad3d27ab4e27f67fca | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 367 | py | import string
def word_count(phrase):
punct = set(string.punctuation)
no_punct =''
for char in phrase:
if char not in punct:
no_punct += char
dict = {}
for word in no_punct.split():
str = word.lower()
if str not in dict:
dict[str] = 1
else:
dict[str] +=1
return dict
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
665eedf826b911f95e4b7ff7f0f21c48d89547fb | 7eda5c4c9bfedbd561d77df14c454f0485d8e025 | /Program Assignment4_Mincut/kargerMinCut.py | e4844a29c093e5d88cf14dc66981c36e82f046e1 | [
"MIT"
] | permissive | brianchiang-tw/Algorithm_specialization_Part-I | af6c7e1ad7f70323d1b92086d85dd9f7ec157c1b | 44b24f4a97f23d24bde6a4235f70f7707c8b03b7 | refs/heads/master | 2020-09-12T14:57:59.434448 | 2019-11-18T14:36:37 | 2019-11-18T14:36:37 | 222,459,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,025 | py | import sys
import os
import math
import random
from datetime import datetime
import copy
def edge_contraction( adj_list_dict, vertex_u, vertex_v):
# contract edge(u, v)
# keep vertex u, and update u's adjacency list from appending vertex v's
adj_list_dict[ vertex_u] = adj_list_dict[ vertex_u ] + adj_list_dict[ vertex_v ]
# remove v's adjacency list from global adjacency list
adj_list_dict.pop( vertex_v )
# update each edge(x, v) redircting to edge(x, u)
for i in adj_list_dict:
for j in range( len(adj_list_dict[i] ) ):
if adj_list_dict[i][j] == vertex_v:
adj_list_dict[i][j] = vertex_u
# eliminate all self-loop edges during current edge contraction
adj_list_dict[ vertex_u ] = list( filter(lambda vertex: vertex != vertex_u, adj_list_dict[vertex_u] ) )
# return updated adjacency list dictionary
return adj_list_dict
def karger_min_cut( graph_with_adj_list_dict ):
if len(graph_with_adj_list_dict) == 2:
# Base case and stop condition
list_of_all_edge = list( graph_with_adj_list_dict.values() )
# the remaining count of edge is min cut
return len( list_of_all_edge[0] )
else:
# Inductive step:
# Keep conducting karger algorithm until only 2 verteices remain.
# list of all vertex (key value of "graph_with_adj_list_dict" )
list_of_all_vertex_in_graph = list( graph_with_adj_list_dict.keys() )
# randomly choose one edge with two end points, vertex_u and vertex v
# vertex u
vertex_u = random.choice( list_of_all_vertex_in_graph )
# vertex v
vertex_v = random.choice( graph_with_adj_list_dict[vertex_u] )
# conduct edge contraction on edge E = (u, v)
# update graph with adjacency list dictionary
#graph_with_adj_list_dict = edge_contraction( graph_with_adj_list_dict, vertex_u, vertex_v)
# keep ruuning karger algorithm until graph has two vertices only
min_cut = karger_min_cut( edge_contraction( graph_with_adj_list_dict, vertex_u, vertex_v) )
# the remaining count of edge is min cut
return min_cut
def main():
current_work_directory = os.getcwd()
filename = current_work_directory + "\Program Assignment4_Mincut\kargerMinCut.txt"
with open( filename) as file_handle:
# graph is a dictionay, on the basis of adjacency list
# key : vertex i
# value : those verteices connected to vertex i
graph = {}
for one_line in file_handle:
# each line in input text file is well separated by tab, i.e., the "\t"
one_adjacency_list = list( map(int, one_line.strip().split("\t") ) )
# get vertex index as dictionay's key
vertex_i = one_adjacency_list.pop(0)
# print("vertex i : ", vertex_i )
# get adjacency list, excluding the first one value(key), as dictionary's value
graph[vertex_i] = one_adjacency_list
# get size of graph ( the number of vertex)
size_of_graph = len(graph)
v_square = size_of_graph ** 2
# min_cut initialization with |V|^2
min_cut = v_square
# upper_bound initialization with |V|^2 * log |V|
upper_bound = int( v_square*math.log(size_of_graph) )
for i in range( upper_bound ):
new_graph = copy.deepcopy( graph )
current_min_cut = karger_min_cut( new_graph )
'''
print( "\n iteration counter: ", i)
print( "current min cut: ", current_min_cut )
print( "minimal min cut so far", min_cut )
'''
if( current_min_cut < min_cut ):
min_cut = current_min_cut
print("min cut updated in this iteration: ", min_cut)
print("\n final min cut value:", min_cut)
return
if __name__ == "__main__":
main()
| [
"brianchiang1988@icloud.com"
] | brianchiang1988@icloud.com |
de9a7a96eb7c0d7336f901b12ee53b3107656994 | 7503725bc8098d34e0973c5661685582bef0cbbb | /mmdet2trt/models/roi_heads/htc_roi_head.py | 6e9f71f6bf2af79e897e5167e107623189a932a7 | [
"Apache-2.0"
] | permissive | zjj-2015/mmdetection-to-tensorrt | bf280ef9359ec95293eee5062cf184786133543a | e1b91743cd4c9a145fc2b2701baef5ff648a1a4c | refs/heads/master | 2023-08-30T05:00:26.866383 | 2021-11-16T14:18:10 | 2021-11-16T14:18:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,898 | py | import mmdet2trt.ops.util_ops as mm2trt_util
import torch
import torch.nn.functional as F
from mmdet2trt.core.post_processing import merge_aug_masks
from mmdet2trt.models.builder import build_wraper, register_wraper
from mmdet.core.bbox.coder.delta_xywh_bbox_coder import delta2bbox
from .cascade_roi_head import CascadeRoIHeadWraper
@register_wraper('mmdet.models.roi_heads.HybridTaskCascadeRoIHead')
class HybridTaskCascadeRoIHeadWraper(CascadeRoIHeadWraper):
def __init__(self, module, wrap_config):
super(HybridTaskCascadeRoIHeadWraper,
self).__init__(module, wrap_config)
module = self.module
self.semantic_head = None
if module.semantic_head is not None:
self.semantic_roi_extractor = build_wraper(
module.semantic_roi_extractor)
self.semantic_head = module.semantic_head
def _bbox_forward(self, stage, x, rois, semantic_feat=None):
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
if rois.shape[1] == 4:
zeros = rois.new_zeros([rois.shape[0], 1])
rois = torch.cat([zeros, rois], dim=1)
roi_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois)
if self.module.with_semantic and 'box' in self.module.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != roi_feats.shape[-2:]:
bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, roi_feats.shape[-2:])
cls_score, bbox_pred = bbox_head(roi_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=roi_feats)
return bbox_results
def regress_by_class(self, stage, rois, label, bbox_pred):
bbox_head = self.bbox_head[stage]
reg_class_agnostic = bbox_head.reg_class_agnostic
if not reg_class_agnostic:
label = label * 4
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
means = bbox_head.bbox_coder.means
stds = bbox_head.bbox_coder.stds
new_rois = delta2bbox(rois, bbox_pred, means, stds)
return new_rois
def forward(self, feat, proposals, img_shape):
ms_scores = []
batch_size = proposals.shape[0]
num_proposals = proposals.shape[1]
rois_pad = mm2trt_util.arange_by_input(proposals, 0).unsqueeze(1)
rois_pad = rois_pad.repeat(1, num_proposals).view(-1, 1)
proposals = proposals.view(-1, 4)
rois = proposals
if self.module.with_semantic:
_, semantic_feat = self.semantic_head(feat)
else:
semantic_feat = None
for i in range(self.num_stages):
bbox_results = self._bbox_forward(
i,
feat,
torch.cat([rois_pad, rois], dim=1),
semantic_feat=semantic_feat)
ms_scores.append(bbox_results['cls_score'])
bbox_pred = bbox_results['bbox_pred']
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'].argmax(dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_pred, img_shape)
rois = torch.cat([rois_pad, rois], dim=1)
# bbox_head.get_boxes
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_detections, det_boxes, det_scores, det_classes = self.bbox_head[
-1].get_bboxes(rois, cls_score, bbox_pred, img_shape, batch_size,
num_proposals, self.test_cfg)
result = [num_detections, det_boxes, det_scores, det_classes]
if self.enable_mask:
# mask roi input
num_mask_proposals = det_boxes.size(1)
rois_pad = mm2trt_util.arange_by_input(det_boxes, 0).unsqueeze(1)
rois_pad = rois_pad.repeat(1, num_mask_proposals).view(-1, 1)
mask_proposals = det_boxes.view(-1, 4)
mask_rois = torch.cat([rois_pad, mask_proposals], dim=1)
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
feat[:mask_roi_extractor.num_inputs], mask_rois)
if self.module.with_semantic and ('mask'
in self.module.semantic_fusion):
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
aug_masks = []
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.module.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
mask_pred = mask_pred.sigmoid()
aug_masks.append(mask_pred)
mask_pred = merge_aug_masks(aug_masks, self.test_cfg)
mc, mh, mw = mask_pred.shape[1:]
mask_pred = mask_pred.reshape(batch_size, -1, mc, mh, mw)
if not self.module.mask_head[-1].class_agnostic:
det_index = det_classes.unsqueeze(-1).long()
det_index = det_index + 1
mask_pad = mask_pred[:, :, 0:1, ...] * 0
mask_pred = torch.cat([mask_pad, mask_pred], dim=2)
mask_pred = mm2trt_util.gather_topk(
mask_pred, dim=2, index=det_index)
mask_pred = mask_pred.squeeze(2)
result += [mask_pred]
return result
| [
"streetyao@live.com"
] | streetyao@live.com |
07d34db149dfe54a5b165170aabfd130243cfe8d | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_software_termination/12-2Nested_false-termination_5.py | 03da5a2a4a14729312115bb8d36de8036d58e9cf | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,525 | py | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
return frozenset(res)
| [
"en.magnago@gmail.com"
] | en.magnago@gmail.com |
6f66c53bf1536515d0e2b64c20185a6e0d4097b9 | c5c58a08ea6841f7042e037db6ad7eb668dbc0bb | /code/1/ninja/old/basicstats.py | 9adb83819260894257cc252d56d83a8e0865fb01 | [
"MIT"
] | permissive | amritbhanu/fss16591 | 99745091e355568230809e9c56bd7627fe1eea58 | d10c9e6e20c2d131441b6a03a27ecb030842b596 | refs/heads/master | 2020-04-24T01:17:37.431866 | 2017-08-09T15:03:25 | 2017-08-09T15:03:25 | 65,946,447 | 0 | 2 | null | 2016-09-10T03:37:59 | 2016-08-17T22:27:27 | Python | UTF-8 | Python | false | false | 3,072 | py |
""" __________________________________________________
# simplestats.py: simple basic stats
"""
from __future__ import division,print_function
import sys,math
sys.dont_write_bytecode=True
def normalDiff(mu1,sd1,n1,mu2,sd2,n2):
nom = mu2 - mu1
denom = delta/((sd1/n1 + sd2/n2)**0.5) if s1+s2 else 1
return nom/denom
def lstDiff(lst1,lst2):
"""Checks if two means are different, tempered
by the sample size of 'y' and 'z'"""
tmp1 = tmp2 = 0
n1,n2 = len(lst1), len(lst2)
mu1 = sum(lst1) / n1
mu2 = sum(lst2) / n2
tmp1 = sum( (y1 - mu1)**2 for y1 in lst1 )
tmp2 = sum( (y2 - mu2)**2 for y2 in lst2 )
sd1 = ( tmp1 / (n1 - 1) )**0.5
sd2 = ( tmp2 / (n2 - 1) )**0.5
return normalDiff(mu1,sd1,n1,mu2,sd2,n2)
""" _________________________________________________
## Stats tricks
"""
def xtend(x,xs,ys):
"""given pairs ofs values, find the gap with x
and extrapolate at that gap size across the y
xtend(-5, [0,5,10,20], [0,10,20,40] ) ==> -10
xtend(25, [0,5,10,20], [0,10,20,40] ) ==> 50
xtend(40, [0,5,10,20], [0,10,20,40] ) ==> 80
"""
x0, y0 = xs[0], ys[0]
for x1,y1 in zip(xs,ys):
if x < x0 or x > xs[-1] or x0 <= x < x1:
break
x0, y0 = x1, y1
gap = (x - x0)/(x1 - x0)
print dict(x0=x0,x=x,x1=x1,gap=gap,y0=y0,y1=y1)
return y0 + gap*(y1 - y0)
def ttestThreshold(df,conf=99,
xs= [ 1, 2, 5, 10, 15, 20, 25, 30, 60, 100]
ys={0.9: [ 3.078, 1.886, 1.476, 1.372, 1.341, 1.325, 1.316, 1.31, 1.296, 1.29],
0.95: [ 6.314, 2.92, 2.015, 1.812, 1.753, 1.725, 1.708, 1.697, 1.671, 1.66],
0.99: [31.821, 6.965, 3.365, 2.764, 2.602, 2.528, 2.485, 2.457, 2.39, 2.364]}):
return xtend(df,xs,ys[conf])
def ttestSame(lst1,lst2,conf=95):
df = min(len(lst1) - 1, len(lst2) - 1)
return ttestThreshold(df) < lstDiff(lst1,lst2)
def chi2Threshold(df,conf=99,
xs = [ 1 , 2, 5, 10, 15,
20 , 25, 30, 60, 100],
ys= {99 : [ 0.000, 0.020, 0.554, 2.558, 5.229,
8.260, 11.524, 14.953, 37.485, 70.065],
95 : [ 0.004, 0.103, 1.145, 3.940, 7.261,
10.851, 14.611, 18.493, 43.188, 77.929],
90 : [ 0.016, 0.211, 1.610, 4.865, 8.547,
12.443, 16.473, 20.599, 46.459, 82.358]}):
return xtend(df,xs,ys[conf])
def chi2Same(obs1,obs2):
obs12,tot1,tot2,r,c = {},0,0,2,0
for k,v in obs1.items():
c += 1
tot1 += v
obs12[k] = obs12.get(k,0) + v
for k,v in obs2.items():
tot2 += v
obs12[k] = obs12.get(k,0) + v
tots = tot1 + tot2
expect1 = { k:tot1*v/tots for k,v in obs12.items() }
expect2 = { k:tot2*v/tots for k,v in obs12.items() }
chi = [ (obs1[k] - expect)**2/expect for k,expect in expect1.items() ] + [
(obs2[k] - expect)**2/expect for k,expect in expect2.items() ]
df = (r-1)*(c-1)
return chi2Threshold(df) < sum(chi)
| [
"amritbhanu@gmail.com"
] | amritbhanu@gmail.com |
929b2b37442af1e84eb361c74e7758e337b1abab | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /keras/keras19_shape.py | a32d49ea5b537c83e66f5fe52ba953aa27ce27c8 | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | # 1. 데이터
import numpy as np
x = np.array([range(1, 101), range(311, 411), range(100)])
y = np.array(range(711, 811))
# 1-1. 행과 열을 바꾸기 - 전치행렬 구하기
x = x.transpose()
y = y.transpose()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size = 0.8, shuffle = False)
# 2. 모델 구성
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
# model.add(Dense(5, input_dim = 3))
model.add(Dense(5, input_shape = (3, ))) # input_dim = 3과 같다.
model.add(Dense(4))
model.add(Dense(1))
# 3. 훈련
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['mse'])
model.fit(x_train, y_train, epochs = 100, batch_size = 1,
validation_split = 0.25, verbose = 2)
# 4. 평가 및 예측
loss, mse = model.evaluate(x_test, y_test, batch_size = 1)
print("loss : ", loss)
print("mse : ", mse)
y_predict = model.predict(x_test)
print(y_predict)
# 5. RMSE 구하기
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict))
# 6. R2 구하기
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("R2 : ", r2)
| [
"92.seoonooo@gmail.com"
] | 92.seoonooo@gmail.com |
769912c4efb8929002f8bd37954cdb0604e47ca6 | 9e9588fbd2eeb48cb45b053cc37c6c40ef1b5558 | /web_app/app.py | c2837a25a396f7cf1527f63aea717d0bf534a208 | [
"MIT"
] | permissive | ArRosid/question-answer-flask | cfecbb8681b258fe389267fbbc16eec19595c36c | 920f5cf5a16887006819183b2573a9d896e3107d | refs/heads/master | 2021-04-23T00:19:34.742155 | 2020-03-31T05:08:28 | 2020-03-31T05:08:28 | 249,883,456 | 0 | 0 | MIT | 2021-03-20T03:16:30 | 2020-03-25T04:03:36 | HTML | UTF-8 | Python | false | false | 8,066 | py | import os
from flask import (Flask, g, redirect, render_template, request, session,
url_for)
from werkzeug.security import check_password_hash, generate_password_hash
import dbcon
app = Flask(__name__)
app.config["SECRET_KEY"] = os.urandom(24)
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'postgres_db_cur'):
g.postgres_db_cur.close()
if hasattr(g, 'postgres_db_conn'):
g.postgres_db_conn.close()
def get_current_user():
user_result = None
if 'user' in session:
user = session["user"]
db = dbcon.get_db()
db.execute("select * from users where name = %s", (user,))
user_result = db.fetchone()
return user_result
def get_unanswered_question(expert_user_id):
db = dbcon.get_db()
db.execute('''select id from questions
where answer_text is null and expert_id=%s''',
(expert_user_id,))
question_result = db.fetchall()
return len(question_result)
@app.route("/")
def index():
user = get_current_user()
error = request.args.get('error') #get the error message from argument
# Get unanswered question count (only for expert)
unanswered_q = None
if user is not None:
unanswered_q = get_unanswered_question(user["id"])
db = dbcon.get_db()
db.execute(''' select questions.id, questions.question_text,
asker.name as asker_name, expert.name as expert_name
from questions
join users as asker on asker.id = questions.asked_by_id
join users as expert on expert.id = questions.expert_id
where answer_text is not null ''')
questions_results = db.fetchall()
return render_template("home.html", user=user,
questions=questions_results,
unanswered_q=unanswered_q,
error=error)
@app.route("/register", methods=["GET", "POST"])
def register():
db = dbcon.get_db()
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
db.execute("select id from users where name=%s", (username, ))
existing_user = db.fetchone()
if existing_user:
return render_template("register.html", error="User already exist!")
hashed_password = generate_password_hash(password, method='sha256')
db.execute(''' insert into users (name, password, expert, admin)
values (%s, %s, %s, %s)''',
(username, hashed_password, '0', '0'))
session["user"] = username
return redirect(url_for('index'))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
db = dbcon.get_db()
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
db.execute("select id, name, password from users where name = %s ",
(username,))
user = db.fetchone()
if not user: # if the user is not in database
return render_template("login.html", error="Username & Password not match!")
if check_password_hash(user["password"], password):
session["user"] = user["name"]
return redirect(url_for("index"))
else: # if the password is wrong
return render_template("login.html", error="Username & Password not match!")
return render_template("login.html")
@app.route("/ask", methods=["GET","POST"])
def ask():
user = get_current_user()
if not user:
return redirect(url_for("login"))
db = dbcon.get_db()
if request.method == "POST":
db.execute('''insert into questions (question_text, asked_by_id, expert_id)
values (%s,%s,%s)''',
(request.form["question"], user["id"], request.form["expert"]))
return redirect(url_for("index"))
db.execute("select id, name from users where expert = True")
expert_result = db.fetchall()
return render_template("ask.html", user=user, experts=expert_result)
@app.route("/unanswered")
def unanswered():
user = get_current_user()
if not user:
return redirect(url_for("login"))
if not user["expert"]: #only expert can access this route
return redirect(url_for("index", error="You don't permission to access this page!"))
unanswered_q = get_unanswered_question(user["id"])
db = dbcon.get_db()
db.execute('''select questions.id, questions.question_text,
questions.asked_by_id, users.name
from questions
join users on users.id = questions.asked_by_id
where answer_text is null and expert_id = %s''',
(user["id"],))
question_result = db.fetchall()
return render_template("unanswered.html", user=user,
questions=question_result,
unanswered_q=unanswered_q)
@app.route("/answer/<question_id>", methods=["GET","POST"])
def answer(question_id):
user = get_current_user()
if not user:
return redirect(url_for("login"))
if not user["expert"]: # only expert can answer questions
return redirect(url_for("index", error="You don't permission to access this page!"))
db = dbcon.get_db()
if request.method == "POST":
db.execute("update questions set answer_text = %s where id=%s",
(request.form["answer"], question_id,))
return redirect(url_for("unanswered"))
db.execute("select id, question_text from questions where id=%s", (question_id,))
question = db.fetchone()
return render_template("answer.html", user=user, question=question)
@app.route("/question/<question_id>")
def question(question_id):
user = get_current_user
db = dbcon.get_db()
db.execute('''select questions.question_text, questions.answer_text,
asker.name as asker_name, expert.name as expert_name
from questions
join users as asker on asker.id = questions.asked_by_id
join users as expert on expert.id = questions.expert_id
where questions.id = %s''', (question_id,))
question_result = db.fetchone()
return render_template("question.html", question=question_result)
@app.route("/users")
def users():
user = get_current_user()
if not user:
return redirect(url_for('login'))
if not user["admin"]: #only admin can manage user
return redirect(url_for("index", error="You don't permission to access this page!"))
db = dbcon.get_db()
db.execute("select id, name, expert, admin from users")
users_results = db.fetchall()
return render_template("users.html", user=user, users=users_results)
@app.route("/promote/<user_id>")
def promote(user_id):
user = get_current_user()
if not user:
return redirect(url_for("login"))
if not user["admin"]: # only admin can promote user
return redirect(url_for("index", error="You don't permission to access this page!"))
db = dbcon.get_db()
db.execute("select expert from users where id = %s", (user_id,))
user_result = db.fetchone()
if user_result["expert"]: # if user expert, set user to non expert
db.execute("update users set expert = False where id = %s", (user_id,))
else: # if user is not expert, set user to expert
db.execute("update users set expert = True where id = %s", (user_id,))
return redirect(url_for("users"))
@app.route("/logout")
def logout():
session.pop("user", None)
return redirect(url_for("index"))
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5001", debug=True)
| [
"ahmadrosid30121997@gmail.com"
] | ahmadrosid30121997@gmail.com |
e7497d37b10b60b49e884962d8cc83295158c6b3 | a3493aaf1fc0b067d852bb7cd8e81b0fee6145d6 | /Modules_&_pip.py | ba57b9dc523a2416afbdde8e5a9cdf195315561a | [] | no_license | VivakaNand/Python_For_Beginners_by_Udemy | 862c128f8dd4035c794b99494474de661a2be5e0 | ab119426c9ded6f46256ec8f915fdf439b1e520d | refs/heads/master | 2020-11-27T14:30:31.530986 | 2019-12-21T22:25:47 | 2019-12-21T22:25:47 | 229,488,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 03:00:21 2019
@author: VIVEK VISHAN
"""
# Modules & pip
import useful_tools
print(useful_tools.roll_dice(10))
print(useful_tools.feet_in_mile)
import docx
docx.
| [
"vivekjetani83@gmail.com"
] | vivekjetani83@gmail.com |
7446edb80b72ff3519521b98b4f5fbc49526c82d | b68e3b8485ea8bef9fc7b3cbf6baa98a51fa533f | /section14/lesson173/test_calculation.py | 1eac580f11491856c765cc11bd46aca7c0f2f447 | [] | no_license | Naoya-abe/siliconvalley-python | 7936b7e779072b23e16c9d50cca44c2e0bf6eb5f | 8d226adaea839b64b1e5eb62985349b5bb2e1484 | refs/heads/master | 2021-05-20T14:40:47.682812 | 2020-04-27T02:02:43 | 2020-04-27T02:02:43 | 252,336,229 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | # 独自ののfixture
import os
import pytest
import calculation
class TestCal(object):
@classmethod
def setup_class(cls):
cls.cal = calculation.Cal()
cls.test_dir = '/tmp/test_dir'
cls.test_file_name = 'test.txt'
@classmethod
def teardown_class(cls):
import shutil
if os.path.exists(cls.test_dir):
shutil.rmtree(cls.test_dir)
def test_save_no_dir(self):
self.cal.save(self.test_dir, self.test_file_name)
test_file_path = os.path.join(
self.test_dir, self.test_file_name
)
assert os.path.exists(test_file_path) is True
def test_add_and_double(self, csv_file):
print(csv_file)
assert self.cal.add_and_double(1, 1) == 4
def test_save(self, tmpdir):
self.cal.save(tmpdir, self.test_file_name)
test_file_path = os.path.join(
tmpdir, self.test_file_name
)
assert os.path.exists(test_file_path) is True
def test_add_and_double_raise(self):
with pytest.raises(ValueError):
self.cal.add_and_double('1', '1')
| [
"n.abe@gemcook.com"
] | n.abe@gemcook.com |
6649e00fafca3d64cad131fe2b2c0d4be4921d60 | 0ee5ae0b71b81419d4534b2ed8681e28a1ed9ddb | /arxivanalysis/cons.py | 56a990e9ed4ffdff6869afc9a19b71c8628b952e | [
"MIT"
] | permissive | refraction-ray/arxiv-analysis | a0f4542298ecc427a49ec9bb026f0ef31699a7f5 | 10b72853920bc653d5622b17da817a1fc1d83c4e | refs/heads/master | 2023-05-02T16:33:01.629979 | 2023-04-17T02:35:58 | 2023-04-17T02:35:58 | 160,020,387 | 2 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,214 | py | """
some constants
"""
weekdaylist = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
category = {
"astro-ph": "Astrophysics",
"astro-ph.CO": "Cosmology and Nongalactic Astrophysics",
"astro-ph.EP": "Earth and Planetary Astrophysics",
"astro-ph.GA": "Astrophysics of Galaxies",
"astro-ph.HE": "High Energy Astrophysical Phenomena",
"astro-ph.IM": "Instrumentation and Methods for Astrophysics",
"astro-ph.SR": "Solar and Stellar Astrophysics",
"cond-mat.dis-nn": "Disordered Systems and Neural Networks",
"cond-mat.mes-hall": "Mesoscale and Nanoscale Physics",
"cond-mat.mtrl-sci": "Materials Science",
"cond-mat.other": "Other Condensed Matter",
"cond-mat.quant-gas": "Quantum Gases",
"cond-mat.soft": "Soft Condensed Matter",
"cond-mat.stat-mech": "Statistical Mechanics",
"cond-mat.str-el": "Strongly Correlated Electrons",
"cond-mat.supr-con": "Superconductivity",
"cs.AI": "Artificial Intelligence",
"cs.AR": "Hardware Architecture",
"cs.CC": "Computational Complexity",
"cs.CE": "Computational Engineering, Finance, and Science",
"cs.CG": "Computational Geometry",
"cs.CL": "Computation and Language",
"cs.CR": "Cryptography and Security",
"cs.CV": "Computer Vision and Pattern Recognition",
"cs.CY": "Computers and Society",
"cs.DB": "Databases",
"cs.DC": "Distributed, Parallel, and Cluster Computing",
"cs.DL": "Digital Libraries",
"cs.DM": "Discrete Mathematics",
"cs.DS": "Data Structures and Algorithms",
"cs.ET": "Emerging Technologies",
"cs.FL": "Formal Languages and Automata Theory",
"cs.GL": "General Literature",
"cs.GR": "Graphics",
"cs.GT": "Computer Science and Game Theory",
"cs.HC": "Human-Computer Interaction",
"cs.IR": "Information Retrieval",
"cs.IT": "Information Theory",
"cs.LG": "Machine Learning",
"cs.LO": "Logic in Computer Science",
"cs.MA": "Multiagent Systems",
"cs.MM": "Multimedia",
"cs.MS": "Mathematical Software",
"cs.NA": "Numerical Analysis",
"cs.NE": "Neural and Evolutionary Computing",
"cs.NI": "Networking and Internet Architecture",
"cs.OH": "Other Computer Science",
"cs.OS": "Operating Systems",
"cs.PF": "Performance",
"cs.PL": "Programming Languages",
"cs.RO": "Robotics",
"cs.SC": "Symbolic Computation",
"cs.SD": "Sound",
"cs.SE": "Software Engineering",
"cs.SI": "Social and Information Networks",
"cs.SY": "Systems and Control",
"econ.EM": "Econometrics",
"eess.AS": "Audio and Speech Processing",
"eess.IV": "Image and Video Processing",
"eess.SP": "Signal Processing",
"gr-qc": "General Relativity and Quantum Cosmology",
"hep-ex": "High Energy Physics - Experiment",
"hep-lat": "High Energy Physics - Lattice",
"hep-ph": "High Energy Physics - Phenomenology",
"hep-th": "High Energy Physics - Theory",
"math-ph": "Mathematical Physics",
"math.AC": "Commutative Algebra",
"math.AG": "Algebraic Geometry",
"math.AP": "Analysis of PDEs",
"math.AT": "Algebraic Topology",
"math.CA": "Classical Analysis and ODEs",
"math.CO": "Combinatorics",
"math.CT": "Category Theory",
"math.CV": "Complex Variables",
"math.DG": "Differential Geometry",
"math.DS": "Dynamical Systems",
"math.FA": "Functional Analysis",
"math.GM": "General Mathematics",
"math.GN": "General Topology",
"math.GR": "Group Theory",
"math.GT": "Geometric Topology",
"math.HO": "History and Overview",
"math.IT": "Information Theory",
"math.KT": "K-Theory and Homology",
"math.LO": "Logic",
"math.MG": "Metric Geometry",
"math.MP": "Mathematical Physics",
"math.NA": "Numerical Analysis",
"math.NT": "Number Theory",
"math.OA": "Operator Algebras",
"math.OC": "Optimization and Control",
"math.PR": "Probability",
"math.QA": "Quantum Algebra",
"math.RA": "Rings and Algebras",
"math.RT": "Representation Theory",
"math.SG": "Symplectic Geometry",
"math.SP": "Spectral Theory",
"math.ST": "Statistics Theory",
"nlin.AO": "Adaptation and Self-Organizing Systems",
"nlin.CD": "Chaotic Dynamics",
"nlin.CG": "Cellular Automata and Lattice Gases",
"nlin.PS": "Pattern Formation and Solitons",
"nlin.SI": "Exactly Solvable and Integrable Systems",
"nucl-ex": "Nuclear Experiment",
"nucl-th": "Nuclear Theory",
"physics.acc-ph": "Accelerator Physics",
"physics.ao-ph": "Atmospheric and Oceanic Physics",
"physics.app-ph": "Applied Physics",
"physics.atm-clus": "Atomic and Molecular Clusters",
"physics.atom-ph": "Atomic Physics",
"physics.bio-ph": "Biological Physics",
"physics.chem-ph": "Chemical Physics",
"physics.class-ph": "Classical Physics",
"physics.comp-ph": "Computational Physics",
"physics.data-an": "Data Analysis, Statistics and Probability",
"physics.ed-ph": "Physics Education",
"physics.flu-dyn": "Fluid Dynamics",
"physics.gen-ph": "General Physics",
"physics.geo-ph": "Geophysics",
"physics.hist-ph": "History and Philosophy of Physics",
"physics.ins-det": "Instrumentation and Detectors",
"physics.med-ph": "Medical Physics",
"physics.optics": "Optics",
"physics.plasm-ph": "Plasma Physics",
"physics.pop-ph": "Popular Physics",
"physics.soc-ph": "Physics and Society",
"physics.space-ph": "Space Physics",
"q-bio.BM": "Biomolecules",
"q-bio.CB": "Cell Behavior",
"q-bio.GN": "Genomics",
"q-bio.MN": "Molecular Networks",
"q-bio.NC": "Neurons and Cognition",
"q-bio.OT": "Other Quantitative Biology",
"q-bio.PE": "Populations and Evolution",
"q-bio.QM": "Quantitative Methods",
"q-bio.SC": "Subcellular Processes",
"q-bio.TO": "Tissues and Organs",
"q-fin.CP": "Computational Finance",
"q-fin.EC": "Economics",
"q-fin.GN": "General Finance",
"q-fin.MF": "Mathematical Finance",
"q-fin.PM": "Portfolio Management",
"q-fin.PR": "Pricing of Securities",
"q-fin.RM": "Risk Management",
"q-fin.ST": "Statistical Finance",
"q-fin.TR": "Trading and Market Microstructure",
"quant-ph": "Quantum Physics",
"stat.AP": "Applications",
"stat.CO": "Computation",
"stat.ME": "Methodology",
"stat.ML": "Machine Learning",
"stat.OT": "Other Statistics",
"stat.TH": "Statistics Theory",
}
field = {
"astro-ph": "Astrophysics",
"cond-mat": "Condensed Matter",
"gr-qc": "General Relativity and Quantum Cosmology",
"hep-ex": "High Energy Physics - Experiment",
"hep-lat": "High Energy Physics - Lattice",
"hep-ph": "High Energy Physics - Phenomenology",
"hep-th": "High Energy Physics - Theory",
"math-ph": "Mathematical Physics",
"nlin": "Nonlinear Sciences",
"nucl-ex": "Nuclear Experiment",
"nucl-th": "Nuclear Theory",
"physics": "Physics",
"quant-ph": "Quantum Physics",
"math": "Mathematics",
"CoRR": "Computing Research Repository",
"q-bio": "Quantitative Biology",
"q-fin": "Quantitative Finance",
"stat": "Statistics",
"eess": "Electrical Engineering and System Science",
"econ": "Economics",
}
| [
"kcanamgal@foxmail.com"
] | kcanamgal@foxmail.com |
e26636a9ae7ceac0ee556870e3f586cf4b775f62 | 032a1ad3c94e1126729417a16e2a95743d121244 | /cell_fitting/optimization/evaluation/plots_for_thesis/dap_mechanism/blocking_channels.py | 44fad4a980d5b5fbf8da14d9834d41c3231a5824 | [] | no_license | cafischer/cell_fitting | 0fd928f5ae59488e12c77648c2e6227c1911d0e9 | 75a81987e1b455f43b5abdc8a9baf6b8f863bee2 | refs/heads/master | 2021-01-23T19:27:30.635173 | 2019-09-14T08:46:57 | 2019-09-14T08:46:57 | 44,301,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,869 | py | import numpy as np
import os
import matplotlib.pyplot as pl
import matplotlib.gridspec as gridspec
from nrn_wrapper import Cell
from cell_fitting.optimization.evaluation import simulate_model, simulate_model_currents
from cell_fitting.optimization.evaluation.plot_blocking.block_channel import block_channel, \
block_channel_at_timepoint, plot_channel_block_on_ax
from cell_fitting.optimization.evaluation import get_spike_characteristics_dict
from cell_fitting.optimization.simulate import get_standard_simulation_params
from cell_characteristics.analyze_APs import get_spike_characteristics
pl.style.use('paper')
if __name__ == '__main__':
save_dir_img = '/home/cfischer/Dropbox/thesis/figures_results'
save_dir_model = '/home/cfischer/Phd/programming/projects/cell_fitting/cell_fitting/results/best_models'
mechanism_dir = '/home/cfischer/Phd/programming/projects/cell_fitting/cell_fitting/model/channels/vavoulis'
#save_dir_data = '/home/cfischer/Phd/DAP-Project/cell_data/raw_data'
save_dir_data = '/media/cfischer/TOSHIBA EXT/2019-04-03-Sicherung_all/Phd/DAP-Project/cell_data/raw_data'
save_dir_data_plots = '/home/cfischer/Phd/programming/projects/cell_fitting/cell_fitting/data/plots'
model = '2'
exp_cell = '2015_08_26b'
ramp_amp = 3.5
standard_sim_params = get_standard_simulation_params()
standard_sim_params['tstop'] = 162
# create model cell
cell = Cell.from_modeldir(os.path.join(save_dir_model, model, 'cell_rounded.json'), mechanism_dir)
# simulate cell
v_model, t_model, i_inj = simulate_model(cell, 'rampIV', ramp_amp, **standard_sim_params)
currents, channel_list = simulate_model_currents(cell, 'rampIV', ramp_amp, **standard_sim_params)
# plot
fig = pl.figure(figsize=(11, 7))
outer = gridspec.GridSpec(2, 3)
# blocking ion channels whole trace
axes = [outer[0, 0], outer[0, 1], outer[0, 2]]
percent_blocks = [10, 50, 100]
letters = ['A', 'B', 'C']
for percent_block_idx, percent_block in enumerate(percent_blocks):
ax = pl.Subplot(fig, axes[percent_block_idx])
fig.add_subplot(ax)
v_after_block = np.zeros((len(channel_list), len(t_model)))
for i, channel_name in enumerate(channel_list):
cell = Cell.from_modeldir(os.path.join(save_dir_model, model, 'cell.json'))
block_channel(cell, channel_name, percent_block)
v_after_block[i, :], _, _ = simulate_model(cell, 'rampIV', ramp_amp, **standard_sim_params)
plot_channel_block_on_ax(ax, channel_list, t_model, v_model, v_after_block, percent_block,
plot_with_ellipses=True)
ax.set_ylim(-100, 60)
ax.set_xlim(0, t_model[-1])
ax.get_yaxis().set_label_coords(-0.15, 0.5)
ax.text(-0.25, 1.0, letters[percent_block_idx], transform=ax.transAxes, size=18, weight='bold')
# from cell_fitting.optimization.evaluation import get_spike_characteristics_dict
# AP_width_before_block = get_spike_characteristics(v_after_block[4], t_model, ['AP_width'], -75, **get_spike_characteristics_dict())
# AP_width_block_HCN = get_spike_characteristics(v_after_block[4], t_model, ['AP_width'], -75, **get_spike_characteristics_dict())
# AP width is the same
# blocking ion channels after AP
axes = [outer[1, 0], outer[1, 1], outer[1, 2]]
letters = ['D', 'E', 'F']
start_i_inj = np.where(np.diff(np.abs(i_inj)) > 0)[0][0] + 1
v_rest = np.mean(v_model[0:start_i_inj])
fAHP_min_idx = get_spike_characteristics(v_model, t_model, ['fAHP_min_idx'], v_rest,
check=False, **get_spike_characteristics_dict())[0]
for percent_block_idx, percent_block in enumerate(percent_blocks):
ax = pl.Subplot(fig, axes[percent_block_idx])
fig.add_subplot(ax)
v_after_block = np.zeros((len(channel_list), len(t_model)))
for i, channel_name in enumerate(channel_list):
cell = Cell.from_modeldir(os.path.join(save_dir_model, model, 'cell.json'))
block_channel_at_timepoint(cell, channel_name, percent_block,
t_model[fAHP_min_idx]+standard_sim_params['onset'])
v_after_block[i, :], _, _ = simulate_model(cell, 'rampIV', ramp_amp, **standard_sim_params)
plot_channel_block_on_ax(ax, channel_list, t_model, v_model, v_after_block, percent_block,
plot_with_ellipses=True)
ax.set_ylim(-100, 60)
ax.set_xlim(0, t_model[-1])
ax.get_yaxis().set_label_coords(-0.15, 0.5)
ax.text(-0.25, 1.0, letters[percent_block_idx], transform=ax.transAxes, size=18, weight='bold')
pl.tight_layout()
pl.subplots_adjust(left=0.07, bottom=0.07)
#pl.savefig(os.path.join(save_dir_img, 'block_channels.png'))
pl.show() | [
"coralinefischer@gmail.com"
] | coralinefischer@gmail.com |
b0aa9837b396935b4b74ad19f72b0b276e28e19b | be4e7d877a7a61237f3a58315158a20f125dc71c | /cartridge/shop/page_processors.py | b2c72c68cc708d9c80b20ff8a0708d8ed5dd9d4d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | krbanton/cartridge | 14d846e85524e743f83794e4628acaa29d24950d | 41deb8812cceacf47a057233e5a020c2ea04b786 | refs/heads/master | 2021-01-17T11:43:26.693347 | 2012-03-24T11:57:10 | 2012-03-24T11:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py |
from django.template.defaultfilters import slugify
from mezzanine.conf import settings
from mezzanine.pages.page_processors import processor_for
from mezzanine.utils.views import paginate
from cartridge.shop.models import Category, Product
@processor_for(Category)
def category_processor(request, page):
"""
Add paging/sorting to the products for the category.
"""
settings.use_editable()
products = Product.objects.published(for_user=request.user
).filter(page.category.filters()).distinct()
sort_options = [(slugify(option[0]), option[1])
for option in settings.SHOP_PRODUCT_SORT_OPTIONS]
sort_by = request.GET.get("sort", sort_options[0][0])
products = paginate(products.order_by(dict(sort_options).get(sort_by)),
request.GET.get("page", 1),
settings.SHOP_PER_PAGE_CATEGORY,
settings.MAX_PAGING_LINKS)
products.sort_by = sort_by
return {"products": products}
| [
"steve@jupo.org"
] | steve@jupo.org |
6a572f3e0982bbe239ad1b6c507d6fc5419da16c | ed7fde0483a4836bfc9ef3ab887cf1220559bfc7 | /phd/i18_remove_ref.py | 69d65e676406a2e3eea0335ebd761ec9ae61f6b8 | [] | no_license | cizydorczyk/python_scripts | 326b3142a3c6ce850237e8b13e229854699c6359 | b914dcff60727bbfaa2b32e1a634ca9ca354eeeb | refs/heads/master | 2023-05-11T14:29:44.548144 | 2023-05-05T19:39:28 | 2023-05-05T19:39:28 | 116,588,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import os
import argparse
from Bio import SeqIO
def RemoveReference(seq_to_remove, fasta, output_fasta):
print(fasta)
records = list(SeqIO.parse(fasta, "fasta"))
records2 = [i for i in records if i.id not in seq_to_remove.split(",")]
with open(output_fasta, "w") as outfile:
SeqIO.write(records2, outfile, "fasta")
| [
"conradizydorczyk@gmail.com"
] | conradizydorczyk@gmail.com |
c56397e5206162d095057fc4190656549c5a4445 | 6edb295c0eacc50655d83ece6566325fd0c46afb | /VBF/kfactors/plot.py | f59ed6e114e3f98f8fa4041c6fa080116a926eb1 | [
"MIT"
] | permissive | mcremone/PandaAnalysis | 74ffe64e77887d6622883b4d856d5ef61759eb92 | 078dc6ba435335ba1f8bceecb12459751ce3f5c3 | refs/heads/master | 2020-12-30T12:56:49.093685 | 2017-05-03T11:17:37 | 2017-05-03T11:17:37 | 91,379,788 | 0 | 5 | MIT | 2019-12-11T09:33:59 | 2017-05-15T20:05:33 | Python | UTF-8 | Python | false | false | 3,536 | py | #!/usr/bin/env python
from os import system,getenv
from sys import argv
import argparse
### SET GLOBAL VARIABLES ###
baseDir = '/home/snarayan/home000/store/kfactors/skimmed/'
parser = argparse.ArgumentParser(description='plot stuff')
parser.add_argument('--outdir',metavar='outdir',type=str)
args = parser.parse_args()
sname=argv[0]
argv=[]
import ROOT as root
from ROOT import gROOT
from PandaCore.Tools.Load import *
from PandaCore.Tools.Misc import *
from array import array
from math import sqrt
Load('Drawers','HistogramDrawer')
### DEFINE REGIONS ###
recoilBins = [200,250,300,350,400,500,600,1000]
nRecoilBins = len(recoilBins)-1
recoilBins = array('f',recoilBins)
ptBins = [100,120,160,200,250,300,350,400,450,500,550,600,650,700,800,900,1000,1200]
nPtBins = len(ptBins)-1
ptBins = array('f',ptBins)
plot = root.HistogramDrawer()
plot.SetTDRStyle()
plot.AddCMSLabel()
plot.Logy(True)
#plot.SetAbsMin(0.0001)
plot.InitLegend()
plotr = root.HistogramDrawer()
plotr.SetRatioStyle()
plotr.AddCMSLabel()
#plotr.InitLegend(.15,.6,.5,.8)
plotr.InitLegend()
counter=0
fzlo = root.TFile(baseDir+'z_lo.root'); tzlo = fzlo.Get('events')
fznlo = root.TFile(baseDir+'z_nlo.root'); tznlo = fznlo.Get('events')
fwlo = root.TFile(baseDir+'w_lo.root'); twlo = fwlo.Get('events')
fwnlo = root.TFile(baseDir+'w_nlo.root'); twnlo = fwnlo.Get('events')
ctmp = root.TCanvas()
def getDist(tree,var,bins,xlabel,cut='1==1'):
global counter
ctmp.cd()
if len(bins)==3:
h = root.TH1D('h%i'%counter,'h%i'%counter,bins[0],bins[1],bins[2])
scale=False
else:
h = root.TH1D('h%i'%counter,'h%i'%counter,len(bins)-1,bins)
scale=True
h.GetXaxis().SetTitle(xlabel)
h.GetYaxis().SetTitle('')
tree.Draw('%s>>h%i'%(var,counter),'weight*(%s)'%(cut))
if scale:
h.Scale(1,'width')
counter += 1
h.SetFillStyle(0)
return h
def plotDist(V,dists,cut):
if V=='Z':
tlo = tzlo
tnlo = tznlo
else:
tlo = twlo
tnlo = twnlo
toreturn = []
for d in dists:
hlo = getDist(tlo,d[0],d[1],d[2],cut)
hnlo = getDist(tnlo,d[0],d[1],d[2],cut)
toreturn.append((hlo,hnlo))
plot.AddHistogram(hlo,'%s LO'%(V),root.kSignal2)
plot.AddHistogram(hnlo,'%s NLO'%(V),root.kExtra2)
if len(d)<4 or d[3]==None:
plot.Draw(args.outdir,V+'_'+d[0])
else:
plot.Draw(args.outdir,V+'_'+d[3])
plot.Reset()
plot.AddCMSLabel()
return toreturn
def plotKFactors(V,hists,name):
# hists is a list of tuples (hlo, hnlo, label)
counter=0
for hlo,hnlo,label in hists:
hratio = hnlo.Clone()
hratio.Divide(hlo)
if counter==0:
hratio.SetMaximum(2); hratio.SetMinimum(0)
plotr.AddHistogram(hratio,label,root.kExtra1+counter)
hratioerr = hratio.Clone()
hratioerr.SetFillStyle(3004)
hratioerr.SetFillColorAlpha(root.kBlack,0.5)
hratioerr.SetLineWidth(0)
plotr.AddAdditional(hratioerr,'e2')
counter += 1
plotr.Draw(args.outdir,V+'_'+name)
plotr.Reset()
plotr.AddCMSLabel()
hmono = plotDist('Z',[('vpt',ptBins,'p_{T}^{V} [GeV]','vpt_monojet')],'njet>0 && jet1pt>100')[0]
hdi = plotDist('Z',[('vpt',ptBins,'p_{T}^{V} [GeV]','vpt_dijet')],'njet>1 && jet1pt>80 && jet2pt>40 && jet1eta*jet2eta<0')[0]
hvbf = plotDist('Z',[('vpt',ptBins,'p_{T}^{V} [GeV]','vpt_vbf')],'njet>1 && jet1pt>80 && jet2pt>40 && jet1eta*jet2eta<0 && mjj>1100')[0]
plotKFactors('Z',[(hmono[0],hmono[1],'Monojet'),
(hdi[0],hdi[1],'Dijet'),
(hvbf[0],hvbf[1],'VBF')],'kfactor_ptV')
#plotDist('W',[('vpt',recoilBins,'p_{T}^{V} [GeV]')])
| [
"sidn@mit.edu"
] | sidn@mit.edu |
ad1d4b5235569f72c77e592f75ebac24c3935bd0 | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/easy/p590_postorder.py | 5954e459ee2b9d55d61a3032eef31365502caca2 | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
def postorder(self, root: 'Node'):
ret = []
def dfs(node):
if node is None:
return
for child in node.children:
dfs(child)
ret.append(node.val)
dfs(root)
return ret
root = Node(1, [])
slu = Solution()
print(slu.postorder(root))
| [
"8390671@qq.com"
] | 8390671@qq.com |
3731f5f7f3dacddba1e20322528e5137c82ad1d6 | 8bcaec3e096158f875e08cc6c18df8f7ff1e2586 | /codechef/DEC20B-codechef/even_pair_sum.py | 779696b154a80646ec9874c9dde3f9f8c7ff1ced | [] | no_license | Aryamanz29/DSA-CP | 5693d7e169f3a165a64771efd713d1fa8dd3b418 | 306ebd4b623ec79c2657eeba1ff1ce0fc294be50 | refs/heads/master | 2023-04-08T23:33:20.406720 | 2021-04-11T20:27:26 | 2021-04-11T20:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | def get_even(n):
return n//2
for _ in range(int(input())):
a, b = list(map(int, input().split()))
less = min(a, b)
if a == less:
even = get_even(b)
odd = b-even
else:
even = get_even(a)
odd = a-even
# for i in range(1, a+1):
# for j in range(1, b+1):
# if (i+j) % 2 == 0:
# print(i, j)
# print("-----")
# print("=======")
# ans = 0
less_even = get_even(less)
less_odd = less - less_even
ans = less_even*even + less_odd*odd
# for i in range(1, less+1):
# if i % 2 == 0:
# ans += even
# else:
# ans += odd
print(ans)
| [
"sankalp123427@gmail.com"
] | sankalp123427@gmail.com |
110b91f1cab239e9e82f0af4b73f1f032a8f1ff8 | c717b260750d9c733b40e668d2841dee92167699 | /hardware/mechanics/electronics_mount/main_plate/cnc/drill_4-40_insert_hole.py | e3384f36753a7e295809bf5c3374bc87e3420f2c | [] | no_license | hanhanhan-kim/noah_motion_system | b68e3fc6db1a0faea272ead7a22a043dfb80a6c8 | 5bea2750eac638b9f90720b10b5e2516f108c65b | refs/heads/master | 2022-11-06T08:20:49.977792 | 2017-10-06T00:12:05 | 2017-10-06T00:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | from __future__ import print_function
import os
import sys
from py2gcode import gcode_cmd
from py2gcode import cnc_dxf
feedrate = 50.0
fileName = 'main_plate.dxf'
stockThickness = 0.25
drillMargin = 0.125
startZ = 0.0
stopZ = -(stockThickness + drillMargin)
safeZ = 0.3
stepZ = 0.05
startDwell = 0.5
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
param = {
'fileName' : fileName,
'layers' : ['4-40_insert_hole'],
'dxfTypes' : ['CIRCLE'],
'startZ' : startZ,
'stopZ' : stopZ,
'safeZ' : safeZ,
'stepZ' : stepZ,
'startDwell' : startDwell,
}
drill = cnc_dxf.DxfDrill(param)
prog.add(drill)
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.End(),comment=True)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
print('generating: {0}'.format(fileName))
prog.write(fileName)
| [
"will@iorodeo.com"
] | will@iorodeo.com |
6adf71907c74640fac9ba5a3035be2f8fa45d1b4 | 52908b901ebebbecf94f68c5ed4edb748d8b83d7 | /chatette/parsing/lexing/rule_percent_gen.py | 71f87422992a30e781efff7485c03d85652b9b94 | [
"MIT"
] | permissive | ImanMesgaran/Chatette | 6edef61740ba75ead35e240350359f1c3ee2de3c | fd22b6c2e4a27b222071c93772c2ae99387aa5c3 | refs/heads/master | 2023-07-01T16:36:39.865660 | 2021-06-08T20:14:24 | 2021-06-08T20:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # coding: utf-8
"""
Module `chatette.parsing.lexing.rule_percent_gen`
Contains the class representing the lexing rule meant to tokenize
percentage for the random generation modifiers.
"""
from chatette.parsing.lexing.lexing_rule import LexingRule
from chatette.parsing.lexing import LexicalToken, TerminalType
from chatette.parsing.lexing.rule_whitespaces import RuleWhitespaces
class RulePercentGen(LexingRule):
def _apply_strategy(self, **kwargs):
while self._text[self._next_index].isdigit():
self._next_index += 1
self._update_furthest_matched_index()
percentage = self._text[self._start_index:self._next_index]
if self._text[self._next_index] != '.':
if len(percentage) == 0:
self.error_msg = \
"Invalid token. Expected a percentage for the random " + \
"generation modifier."
return False
else:
percentage += '.'
self._next_index += 1
self._update_furthest_matched_index()
start_index_non_int_part = self._next_index
while self._text[self._next_index].isdigit():
self._next_index += 1
self._update_furthest_matched_index()
if self._next_index == start_index_non_int_part:
self.error_msg = \
"Invalid token. Cannot have a percentage with an empty " + \
"non-integral part."
return False
percentage += self._text[start_index_non_int_part:self._next_index]
if not self._try_to_match_rule(RuleWhitespaces):
self.error_msg = None
# Ignore tokens as this whitespace is not meaningful
if self._text[self._next_index] == '%':
self._next_index += 1
self._update_furthest_matched_index()
self._tokens.append(LexicalToken(TerminalType.percentgen, percentage))
return True
| [
"simon.gustin@hotmail.com"
] | simon.gustin@hotmail.com |
5f3d1ab0f166594fa5cfca4b4bae63f0cccd32fe | adc6d8ee596e4710c3241332758bb6990bdd8914 | /Imagenes doc/Evaluación/RE.py | b3429635eb088404f911957ccaa23ada29324936 | [] | no_license | NatalyTinoco/Trabajo-de-grado_Artefactos | cf9491c47a8a23ce5bab7c52498093a61319f834 | 5cc4e009f94c871c7ed0d820eb113398ac66ec2f | refs/heads/master | 2022-03-20T00:51:48.420253 | 2019-11-24T19:10:40 | 2019-11-24T19:10:40 | 197,964,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 21:59:15 2019
@author: Nataly
"""
from matplotlib import pyplot as plt
import cv2
import numpy as np
i=0
file='00000.jpg'
seg='00000_seg.jpg'
img = cv2.imread(file)
def tloga(img):
img = (np.log(img+1)/(np.log(1+np.max(img))))*255
img = np.array(img,dtype=np.uint8)
return img
img=tloga(img)
img=cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
segima=img.copy()
imaROI=cv2.imread(seg,0)
imaROI1=imaROI.copy()
imaROI1=imaROI*-1
imaROI=cv2.normalize(imaROI, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)
imaROI1=cv2.normalize(imaROI1, None, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3)
for z in range(3):
img[:,:,z]= img[:,:,z]*(imaROI)
plt.imshow(img)
plt.show()
#plt.hist(img.ravel(),[256])
#plt.show()
for i in range(3):
hist = cv2.calcHist([img], [i], None, [256], [1, 256])
plt.plot(hist)
plt.show()
for z in range(3):
segima[:,:,z]= segima[:,:,z]*imaROI1
plt.imshow(segima)
plt.show()
for i in range(3):
hist = cv2.calcHist([segima], [i], None, [256], [1, 256])
plt.plot(hist)
plt.show()
#plt.imshow(imaROI1,'Greys')
#plt.show
| [
"51056570+NatalyTinoco@users.noreply.github.com"
] | 51056570+NatalyTinoco@users.noreply.github.com |
75caa21cdb6da2ed748dafe135772382e987e81f | c1eb69dc5dc5b83d987d1bda0bd74a2d7d912fdf | /articles/migrations/0031_merge.py | d3d5ce4a90180e735765e1096d04c22ba755cb79 | [
"MIT"
] | permissive | CIGIHub/opencanada | 47c4e9268343aaaf0fe06b62c1838871968a0b87 | 6334ff412addc0562ac247080194e5d182e8e924 | refs/heads/staging | 2023-05-07T16:02:35.915344 | 2021-05-26T18:10:09 | 2021-05-26T18:10:09 | 36,510,047 | 8 | 2 | MIT | 2020-07-06T14:22:09 | 2015-05-29T14:43:28 | Python | UTF-8 | Python | false | false | 315 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0030_auto_20150806_2136'),
('articles', '0030_articlecategory_include_main_image'),
]
operations = [
]
| [
"csimpson@cigionline.org"
] | csimpson@cigionline.org |
a20131c6b9f7b27a6ae04fe5be74645df91f9be4 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/OpenAITaskRequest.py | 9d342c7d8e6e785ae43c35b9c816ca0b25ef9696 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 3,272 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpolardb.endpoint import endpoint_data
class OpenAITaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'OpenAITask','polardb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_NodeType(self): # String
return self.get_query_params().get('NodeType')
def set_NodeType(self, NodeType): # String
self.add_query_param('NodeType', NodeType)
def get_DescribeType(self): # String
return self.get_query_params().get('DescribeType')
def set_DescribeType(self, DescribeType): # String
self.add_query_param('DescribeType', DescribeType)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_DBClusterId(self): # String
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self, DBClusterId): # String
self.add_query_param('DBClusterId', DBClusterId)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
93c6d4f655e6cecaf0204c9fde501bd9f14f9b0a | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /program42.py | 583befdf151b9f7a4196e04186dc04cce05c8aa2 | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | a,b=map(int,input().split())
c,d=map(int,input().split())
if a==c:
print(a)
elif a>c and b>=d:
print(-1)
elif a<c and b<=d:
print(-1)
else:
x1=a;x2=b
| [
"ankitagrawal11b@gmail.com"
] | ankitagrawal11b@gmail.com |
8d820380e47b4db8af57aa047a0dc8cc8e697560 | d6fe71e3e995c03b8f5151ab1d53411b77b325ba | /walklist_api_service/models/response.py | 2168793ce5bbd0c675b43d9a5a3dd17848c6d775 | [] | no_license | mwilkins91/petpoint-scraper | 95468ae9951deaa8bd3bef7d88c0ff660146c1a3 | dd0c60c68fc6a7d11358aa63d28fdf07fff3c7cd | refs/heads/master | 2022-11-27T00:02:50.654404 | 2020-08-09T18:41:40 | 2020-08-09T18:41:40 | 286,180,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | # coding: utf-8
"""
The Enrichment List
The THS enrichment list # noqa: E501
OpenAPI spec version: 1.0.0
Contact: contactme@markwilkins.co
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Response(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'payload': 'AnyOfResponsePayload',
'meta': 'ResponseMeta'
}
attribute_map = {
'payload': 'payload',
'meta': 'meta'
}
def __init__(self, payload=None, meta=None): # noqa: E501
"""Response - a model defined in Swagger""" # noqa: E501
self._payload = None
self._meta = None
self.discriminator = None
if payload is not None:
self.payload = payload
if meta is not None:
self.meta = meta
@property
def payload(self):
"""Gets the payload of this Response. # noqa: E501
:return: The payload of this Response. # noqa: E501
:rtype: AnyOfResponsePayload
"""
return self._payload
@payload.setter
def payload(self, payload):
"""Sets the payload of this Response.
:param payload: The payload of this Response. # noqa: E501
:type: AnyOfResponsePayload
"""
self._payload = payload
@property
def meta(self):
"""Gets the meta of this Response. # noqa: E501
:return: The meta of this Response. # noqa: E501
:rtype: ResponseMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this Response.
:param meta: The meta of this Response. # noqa: E501
:type: ResponseMeta
"""
self._meta = meta
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(getResponse(), dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"contactme@markwilkins.co"
] | contactme@markwilkins.co |
394db0d9907aa1558d646da41d52cb08d950dc1c | 0652d264baea6238c0b581f17fdf2ff6cb45f537 | /websauna/system/form/csrf.py | 79d964adf49107a108eb25f1ef5598df2cef83c9 | [
"MIT",
"Apache-2.0"
] | permissive | gitter-badger/websauna | f12fc57322c9c86bb2859a30c346858e8ede209e | 09c07d80a831d1f718ec05aea0f85293a1198063 | refs/heads/master | 2021-01-22T19:15:15.071709 | 2016-04-21T15:10:30 | 2016-04-21T15:10:30 | 56,784,419 | 0 | 0 | null | 2016-04-21T15:19:24 | 2016-04-21T15:19:24 | null | UTF-8 | Python | false | false | 456 | py | """Deform CSRF token support."""
import colander
import deform
from pyramid_deform import deferred_csrf_validator
from pyramid_deform import deferred_csrf_value
def add_csrf(schema: colander.Schema):
"""Add a hidden CSRF field on the schema."""
csrf_token = colander.SchemaNode(colander.String(), name="csrf_token", widget=deform.widget.HiddenWidget(), default=deferred_csrf_value, validator=deferred_csrf_validator,)
schema.add(csrf_token)
| [
"mikko@opensourcehacker.com"
] | mikko@opensourcehacker.com |
89a49c4c96b660fbd71aa567dc005a322340dde8 | 330899fd4a9653e05e2a09e0a4f30c119af97ad4 | /python/hidet/transforms/common/scope.py | e171328b5e5e34acb2c17ac4d9e0d7127f5ec878 | [
"Apache-2.0"
] | permissive | yaoyaoding/hidet-artifacts | f8a4707c7fc28aa7bfa4dab3a9f2a9387c020f99 | f2e9767bb2464bd0592a8ec0b276f97481f13df2 | refs/heads/main | 2023-04-30T13:12:57.350002 | 2023-04-24T19:37:34 | 2023-04-24T19:37:34 | 551,692,225 | 3 | 1 | Apache-2.0 | 2022-11-01T23:25:17 | 2022-10-14T22:40:28 | Python | UTF-8 | Python | false | false | 5,309 | py | from typing import List, Dict, Optional, ContextManager
from hidet.ir.type import ScalarType, FuncType
from hidet.ir.expr import Expr, Var, BitwiseAnd, LeftShift, BitwiseOr
from hidet.ir.functors import collect
from hidet.ir.stmt import LetStmt, ForStmt
from hidet.ir.func import Function
from hidet.ir.functors import FuncStmtExprRewriter
class Scope:
"""
Every variable (i.e., parameter variable, local variable, loop variable, let variable) much be declared or defined
in a scope. Parameter, local and loop variable should be declared, because we should not move it place. Every
let variable should be defined (with their value).
"""
def __init__(self, stack, scope_stmt):
self.stack: 'ScopeStack' = stack
self.scope_stmt = scope_stmt
self.level = None
self.parent: Optional['Scope'] = None
self.declare_vars: List[Var] = []
self.defined_vars: List[Var] = []
self.var2value: Dict[Var, Optional[Expr]] = {}
self.defined_predicates: List[List[Expr]] = []
self.predicate_vars: List[Var] = []
def __enter__(self):
scopes = self.stack.scopes
self.parent = scopes[0] if len(scopes) > 0 else None
self.level = len(scopes)
scopes.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
scope = self.stack.scopes.pop()
assert scope is self
def declare(self, var: Var):
# declare a variable at current scope
self.declare_vars.append(var)
self.var2value[var] = None
assert var not in self.stack.var2scope
self.stack.var2scope[var] = self
def define(self, var: Var, value: Expr):
self.defined_vars.append(var)
self.var2value[var] = value
assert var not in self.stack.var2scope
self.stack.var2scope[var] = self
def define_predicate(self, predicate: Expr) -> Expr:
if len(self.defined_predicates) == 0 or len(self.defined_predicates[-1]) == 32:
var = Var('p', type=ScalarType('uint32'))
self.defined_predicates.append([])
self.predicate_vars.append(var)
self.stack.var2scope[var] = self
self.defined_predicates[-1].append(predicate)
mask = 1 << (len(self.defined_predicates[-1]) - 1)
return BitwiseAnd(self.predicate_vars[-1], mask)
def wrap(self, body):
# wrap the body with defined variables at current scope
bind_vars = self.defined_vars
bind_values = [self.var2value[var] for var in bind_vars]
for p_var, p_exprs in zip(self.predicate_vars, self.defined_predicates):
bind_vars.append(p_var)
bind_values.append(BitwiseOr.join_list([LeftShift(p, idx) for idx, p in enumerate(p_exprs)]))
if len(bind_vars) > 0:
ret = LetStmt(bind_vars, bind_values, body)
else:
ret = body
for var in self.defined_vars + self.declare_vars:
del self.stack.var2scope[var]
return ret
class ScopeStack:
def __init__(self):
self.scopes = []
self.var2scope: Dict[Var, Scope] = {}
def find_scope_for_expr(self, expr) -> 'Scope':
used_vars = collect(expr, Var)
levels = [self.var2scope[used_var].level for used_var in used_vars if not isinstance(used_var.type, FuncType)]
max_level = max(levels)
return self.scopes[max_level]
def new_scope(self, scope_stmt=None):
return Scope(self, scope_stmt)
def current(self) -> Scope:
assert len(self.scopes) > 0
return self.scopes[-1]
class FuncStmtExprRewriterWithScope(FuncStmtExprRewriter):
def __init__(self, use_memo=False):
super().__init__(use_memo=use_memo)
self.scope_stack = ScopeStack()
def new_scope(self, stmt=None) -> ContextManager[Scope]:
return self.scope_stack.new_scope(stmt)
def scope_to_define(self, expr: Expr) -> Scope:
return self.scope_stack.find_scope_for_expr(expr)
def visit_Function(self, func: Function):
with self.new_scope(None) as scope:
for extern_var in func.extern_vars:
scope.declare(extern_var)
for param in func.params:
scope.declare(param)
for local_var in func.local_vars:
scope.declare(local_var)
for local_const_var, _ in func.local_const_vars:
scope.declare(local_const_var)
body = scope.wrap(self.visit(func.body))
return Function(func.name, func.params, body, func.ret_type, kind=func.kind, local_vars=func.local_vars,
local_const_vars=func.local_const_vars, extern_vars=func.extern_vars, attrs=func.attrs)
def visit_ForStmt(self, stmt: ForStmt):
with self.new_scope(stmt) as scope:
self.visit(stmt.extent)
scope.declare(stmt.loop_var)
body = scope.wrap(self.visit(stmt.body))
return ForStmt(stmt.loop_var, stmt.extent, stmt.unroll, body)
def visit_LetStmt(self, stmt: LetStmt):
with self.new_scope(stmt) as scope:
for var, value in zip(stmt.bind_vars, stmt.bind_values):
scope.define(var, self.visit(value))
return scope.wrap(self.visit(stmt.body))
| [
"dingyaoyao.cs@gmail.com"
] | dingyaoyao.cs@gmail.com |
7834b8677f64f35c4cc8daa3874916b64985b960 | d9f7123433fe473cfa2fd5c3438251f83ffb326c | /apps/friends/migrations/0001_initial.py | 16167c1f6e319ab036c0be97c12a1794ba42f116 | [] | no_license | mazurbeam/friends | 6c2d201220db52bc85eb1869fd6685eee372e920 | 1dc2432ad371113c0979158053c821a449ebbc6c | refs/heads/master | 2021-01-01T18:27:12.875643 | 2017-07-25T20:46:08 | 2017-07-25T20:46:08 | 98,345,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-25 17:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('login', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('friends', models.ManyToManyField(to='login.User')),
],
),
]
| [
"mazurbeam@gmail.com"
] | mazurbeam@gmail.com |
4ec44310093c3c6d0fdd8224e882899b6e273eb1 | 009df7ad499b19a4df066160cf0c7d8b20355dfb | /src/the_tale/the_tale/game/actions/relations.py | 88e1482deff3017d5f67533c4ecbe384f063fd64 | [
"BSD-3-Clause"
] | permissive | devapromix/the-tale | c0804c7475e877f12f29444ddbbba025561d3412 | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | refs/heads/develop | 2020-03-28T20:26:30.492292 | 2018-10-07T17:32:46 | 2018-10-07T17:32:46 | 149,070,887 | 1 | 0 | BSD-3-Clause | 2018-10-07T17:32:47 | 2018-09-17T04:57:50 | Python | UTF-8 | Python | false | false | 3,039 | py |
import smart_imports
smart_imports.all()
UNINITIALIZED_STATE = 'uninitialized'
class ACTION_EVENT(rels_django.DjangoEnum):
records = (('DISHONORABLE', 0, 'бесчестный герой'),
('NOBLE', 1, 'благородный герой'),
('AGGRESSIVE', 2, 'аггрессивный герой'),
('PEACEABLE', 3, 'миролюбивый герой'),)
class ACTION_HABIT_MODE(rels_django.DjangoEnum):
records = (('AGGRESSIVE', 0, 'агрессивное действие'),
('PEACEFUL', 1, 'мирное действие'),
('COMPANION', 2, 'зависит от спутника'))
class ACTION_EVENT_REWARD(rels_django.DjangoEnum):
priority = rels.Column(unique=False)
records = (('NOTHING', 0, 'без награды', c.HABIT_EVENT_NOTHING_PRIORITY),
('MONEY', 1, 'деньги', c.HABIT_EVENT_MONEY_PRIORITY),
('ARTIFACT', 2, 'артефакт', c.HABIT_EVENT_ARTIFACT_PRIORITY),
('EXPERIENCE', 3, 'опыт', c.HABIT_EVENT_EXPERIENCE_PRIORITY))
class ACTION_TYPE(rels_django.DjangoEnum):
meta = rels.Column(unique=False)
technical = rels.Column(unique=False)
records = (('IDLENESS', 0, 'герой бездельничает', False, False),
('QUEST', 1, 'герой выполненяет задание', False, False),
('MOVE_TO', 2, 'герой путешествует между городами', False, False),
('BATTLE_PVE_1X1', 3, 'герой сражается 1x1 с монстром', False, False),
('RESURRECT', 4, 'герой воскресает', False, False),
('IN_PLACE', 5, 'герой в городе', False, False),
('REST', 6, 'герой лечится', False, False),
('EQUIPPING', 7, 'герой экипируется', False, False),
('TRADING', 8, 'герой торгует', False, False),
('MOVE_NEAR_PLACE', 9, 'герой путешествует около города', False, False),
('REGENERATE_ENERGY', 10, 'герой восстановливает энергию Хранителю', False, False),
('DO_NOTHING', 11, 'техническое действие для особых действий героя в заданиях', False, False),
('META_PROXY', 12, 'техническое прокси-действие для взаимодействия героев', False, True),
('ARENA_PVP_1X1', 13, 'герой сражается 1x1 с другим героем', True, False),
('TEST', 14, 'техническое действие для тестов', False, True),
('HEAL_COMPANION', 15, 'герой ухаживает за спутником', False, False),
('FIRST_STEPS', 16, 'действия героя сразу после иницииации', False, False))
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.