blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8bcd9a0128a396b0316aa7aa231cffccc66c9058 | c0973d6939ef419ed3d261d95167d537499a553a | /OnePy/sys_module/models/base_log.py | 10be52514e22ae2b01ccf0f4a61d1e1ead280e7d | [
"MIT"
] | permissive | mj3428/OnePy | 0c6e4be9b4bb36ae66b566dfa85cd44bae2a07de | 8dc13fc21502daa5786aecaa4451ccba32fc8a14 | refs/heads/master | 2020-04-05T10:28:33.550915 | 2018-11-08T04:07:05 | 2018-11-08T04:07:05 | 134,518,682 | 0 | 0 | MIT | 2018-05-23T05:38:12 | 2018-05-23T05:38:11 | null | UTF-8 | Python | false | false | 1,252 | py | import abc
from dataclasses import dataclass, field
from OnePy.constants import ActionType
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.sys_module.models.signals import SignalByTrigger
@dataclass
class TradeLogBase(OnePyEnvBase, abc.ABC):
buy: float = None
sell: float = None
size: float = None
entry_date: str = field(init=False)
exit_date: str = field(init=False)
entry_price: float = field(init=False)
exit_price: float = field(init=False)
entry_type: str = field(init=False)
exit_type: str = field(init=False)
pl_points: float = field(init=False)
re_pnl: float = field(init=False)
commission: float = field(init=False)
@abc.abstractmethod
def generate(self):
raise NotImplementedError
def _earn_short(self):
return -1 if self.buy.action_type == ActionType.Short else 1
@staticmethod
def _get_order_type(order):
if isinstance(order.signal, SignalByTrigger):
return order.signal.order_type.value
else:
return order.order_type.value
@abc.abstractmethod
def settle_left_trade(self):
raise NotImplementedError
@property
def ticker(self):
return self.buy.ticker
| [
"chenjiayicjy@126.com"
] | chenjiayicjy@126.com |
f4c813ac9b4e86dd3be85f3f246de0881a301805 | 8eef99280d8c821ac7ea8b275140e356f84bc7df | /Lab Exercise 12.23.2020 Raspberry Pi Programs/Morse Code/morse.py | cc765cef2c9e55623346adac9895c0b82174aa78 | [] | no_license | nmessa/Python-2020 | 7f609f383b6771bc77ea4b82be381beacb0f0fa0 | 8860a3563a89596becf897a2763a169be7f3027f | refs/heads/master | 2023-04-03T22:37:02.221375 | 2021-04-18T15:20:03 | 2021-04-18T15:20:03 | 269,480,165 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
def codeGen(s):
output = ''
s = s.lower() #put string in lower case to reduce dictionary size
for i in range(len(s)):
if s[i] == '\n':
output += '\n'
continue
output += convert(s[i])
output += ' '
print (output)
for i in range(len(output)):
if output[i] == '.':
dot()
if output[i] == '-':
dash()
if output[i] == ' ':
time.sleep(0.1)
def convert(letter):
#International Morse Code dictionary
table = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.',
'f':'..-.', 'g':'--.', 'h':'....', 'i':'..', 'j':'.---',
'k':'-.-', 'l':'.-..', 'm':'--', 'n':'-.', 'o':'---',
'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-',
'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--',
'z':'--..', ' ':' ', '1':'.----', '2':'..---',
'3':'...--', '4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.', '0':'-----'}
if letter in table.keys():
return table[letter]
else: #handle other characters where code does not exist in dictionary
return '' # 2 single quotes
def dot():
GPIO.output(18, True)
time.sleep(0.1)
GPIO.output(18, False)
time.sleep(0.1)
def dash():
GPIO.output(18, True)
time.sleep(1)
GPIO.output(18, False)
time.sleep(0.1)
phrase = raw_input("Enter a phrase: ")
codeGen(phrase)
| [
"noreply@github.com"
] | nmessa.noreply@github.com |
d432304a6c98d9ea385f8b61354fe86e2a5fed5d | 09571020d6ad1a240877eaeca2ab07f04120fde4 | /tutorial/quickstart/migrations/0003_hockeyplayer.py | 448783f8661f30cfc046af4b8d27596fcf31e8e2 | [] | no_license | mrozenbaum/hockey-api | d6c0834be7a1b4a380cdff6a72192638b9b5e716 | e7d8d3c042f271e315211b9ccec7d71ed4ff6a53 | refs/heads/master | 2021-01-19T22:23:50.651350 | 2017-04-19T20:57:35 | 2017-04-19T20:57:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-19 20:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quickstart', '0002_cohort'),
]
operations = [
migrations.CreateModel(
name='HockeyPlayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playername', models.CharField(max_length=55)),
('position', models.CharField(max_length=20)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quickstart.HockeyTeam')),
],
),
]
| [
"chortlehoort@gmail.com"
] | chortlehoort@gmail.com |
a7e4cccaa0795f356e4528bde1b4bb8cb788d810 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_annihilate.py | 57243445612083d77876747145ea043a44738353 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py |
#calss header
class _ANNIHILATE():
def __init__(self,):
self.name = "ANNIHILATE"
self.definitions = [u'to destroy something completely so that nothing is left: ', u'to defeat completely: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b846ce7ad380699bf89f611fb2e6c05dee54550e | acaed6ad4a2bb3f6df553debf547d0feafdd99e9 | /Challenges/morse.py | 5fe43b0158efdcf53afcf80c0306d2026f423c7b | [] | no_license | SAM1363/TH-Python | 764691b7b8281b3298ace985039ee9c05ef340a1 | 421c4d7f54ed56233a87c7d9907ac3d1ab993c94 | refs/heads/master | 2020-05-30T01:12:49.404476 | 2019-08-04T04:45:32 | 2019-08-04T04:45:32 | 189,472,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Letter:
def __init__(self, pattern=None):
self.pattern = pattern
def __str__(self):
result = []
for each in self.pattern:
if each == '.':
result.append('dot')
elif each == '_':
result.append('dash')
return '-'.join(result)
def __iter__(self):
yield from self.pattern
@classmethod
def from_string(cls, str):
result = []
slpit_string = str.split('-')
for each in slpit_string:
if each == 'dot':
result.append('.')
else:
result.append('_')
return cls(result)
class S(Letter):
def __init__(self):
pattern = ['.', '.', '.']
super().__init__(pattern)
| [
"isamu3636136@gmail.com"
] | isamu3636136@gmail.com |
fb12f6696d2fb6efc97cfd0202498b5d1dec87b9 | 20a9787564f76ae0fcf2332a8655b21bae0646a3 | /DynamicProgramming/Basic/6_Perm_Coeff.py | 8932c04fa44244fdd4814f172d46f47e02fe4a76 | [] | no_license | nidhiatwork/Python_Coding_Practice | 3b33a40c947413c2695d3ee77728fa69430f14cd | 9d5071a8ddcda19181d3db029fb801d4e3233382 | refs/heads/master | 2023-02-08T20:50:47.522565 | 2023-02-04T10:04:10 | 2023-02-04T10:04:10 | 194,607,759 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | '''
Permutation refers to the process of arranging all the members of a given set to form a sequence. The number of permutations on a set of n elements is given by n! , where “!” represents factorial.
The Permutation Coefficient represented by P(n, k) is used to represent the number of ways to obtain an ordered subset having k elements from a set of n elements.
Mathematically it’s given as:
nPk = n! / (n-k)! = n.(n-1).(n-2)...(n-k+1)
The coefficient can also be computed recursively using the below recursive formula:
P(n, k) = P(n-1, k) + k* P(n-1, k-1)
'''
#recursive way to solve function
def my_permutationCoeff(n,k):
if n<k:
return 1
else:
return n*my_permutationCoeff(n-1,k)
# Driver Program to test above function
print(my_permutationCoeff(5,3))
# A O(n) solution that uses table fact[] to calculate the Permutation Coefficient
def dp_permutationCoeff(n, k):
fact = [0 for i in range(n + 1)]
# base case
fact[0] = 1
# Calculate value
# factorials up to n
for i in range(1, n + 1):
fact[i] = i * fact[i - 1]
# P(n, k) = n!/(n-k)!
return int(fact[n] / fact[n - k])
# Driver Code
n = 5
k = 3
print("Value of P(", n, ", ", k, ") is ",
dp_permutationCoeff(n, k), sep = "")
# A O(n) time and O(1) extra space solution to calculate the Permutation Coefficient
def dp2_PermutationCoeff(n, k):
Fn = 1
# Compute n! and (n-k)!
for i in range(1, n + 1):
Fn *= i
if (i == n - k):
Fk = Fn
coeff = Fn // Fk
return coeff
# Driver Code
n = 5
k = 3
print("Value of P(", n, ", ", k, ") is ",
dp2_PermutationCoeff(n, k), sep = "")
| [
"nidhi.bhushan123@gmail.com"
] | nidhi.bhushan123@gmail.com |
cd236863cf94dd66714d7ad0426d503cbf94f6aa | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/crossbench/crossbench/browsers/applescript.py | 4d16c9e311a186d03b56c6b594f6499b134122e3 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 4,432 | py | # Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import annotations
import abc
import json
import logging
import subprocess
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Tuple
from crossbench import helper
from crossbench.env import ValidationError
from .browser import Browser
if TYPE_CHECKING:
import datetime as dt
import pathlib
from crossbench.runner import Run, Runner
class AppleScript:
@classmethod
def with_args(cls, app_path: pathlib.Path, apple_script: str,
**kwargs) -> Tuple[str, List[str]]:
variables = []
replacements = {}
args: List[str] = []
for variable, value in kwargs.items():
args.append(value)
unique_variable = f"cb_input_{variable}"
replacements[variable] = unique_variable
variables.append(f"set {unique_variable} to (item {len(args)} of argv)")
variables = "\n".join(variables)
formatted_script = apple_script.strip() % replacements
wrapper = f"""
{variables}
tell application "{app_path}"
{formatted_script}
end tell
"""
return wrapper.strip(), args
@classmethod
def js_script_with_args(cls, script: str, args: Sequence[object]) -> str:
"""Create a script that returns [JSON.stringify(result), true] on success,
and [exception.toString(), false] when failing."""
args_str: str = json.dumps(args)
script = """JSON.stringify((function exceptionWrapper(){
try {
return [(function(...arguments){%(script)s}).apply(globalThis, %(args_str)s), true]
} catch(e) {
return [e + "", false]
}
})())""" % {
"script": script,
"args_str": args_str
}
return script.strip()
class JavaScriptFromAppleScriptException(ValueError):
pass
class AppleScriptBrowser(Browser, metaclass=abc.ABCMeta):
APPLE_SCRIPT_ALLOW_JS_MENU: str = ""
APPLE_SCRIPT_JS_COMMAND: str = ""
APPLE_SCRIPT_SET_URL: str = ""
_browser_process: subprocess.Popen
def _exec_apple_script(self, apple_script: str, **kwargs) -> Any:
assert self.platform.is_macos, (
f"Sorry, f{self.__class__} is only supported on MacOS for now")
wrapper_script, args = AppleScript.with_args(self.app_path, apple_script,
**kwargs)
return self.platform.exec_apple_script(wrapper_script, *args)
def start(self, run: Run) -> None:
assert not self._is_running
# Start process directly
startup_flags = self._get_browser_flags(run)
self._browser_process = self.platform.popen(
self.path, *startup_flags, shell=False)
self._pid = self._browser_process.pid
self.platform.sleep(3)
self._exec_apple_script("activate")
self._setup_window()
self._check_js_from_apple_script_allowed(run)
def _check_js_from_apple_script_allowed(self, run: Run) -> None:
try:
self.js(run.runner, "return 1")
except helper.SubprocessError as e:
logging.error("Browser does not allow JS from AppleScript!")
logging.debug(" SubprocessError: %s", e)
run.runner.env.handle_warning(
"Enable JavaScript from Apple Script Events: "
f"'{self.APPLE_SCRIPT_ALLOW_JS_MENU}'")
try:
self.js(run.runner, "return 1;")
except helper.SubprocessError as e:
raise ValidationError(
" JavaScript from Apple Script Events was not enabled") from e
self._is_running = True
@abc.abstractmethod
def _setup_window(self) -> None:
pass
def js(
self,
runner: Runner,
script: str,
timeout: Optional[dt.timedelta] = None,
arguments: Sequence[object] = ()
) -> Any:
del runner, timeout
js_script = AppleScript.js_script_with_args(script, arguments)
json_result: str = self._exec_apple_script(
self.APPLE_SCRIPT_JS_COMMAND.strip(), js_script=js_script).rstrip()
result, is_success = json.loads(json_result)
if not is_success:
raise AppleScript.JavaScriptFromAppleScriptException(result)
return result
def show_url(self, runner: Runner, url: str) -> None:
del runner
self._exec_apple_script(self.APPLE_SCRIPT_SET_URL, url=url)
self.platform.sleep(0.5)
def quit(self, runner: Runner):
del runner
self._exec_apple_script("quit")
self._browser_process.terminate()
| [
"jengelh@inai.de"
] | jengelh@inai.de |
d302fc652c70252a30435ebe768a86b5dc75f570 | 83fed4635c2c23d8f75cd1b9d58347ec165180f1 | /сортировка с подсчетом.py | 3922e3215bf5bdb669d9808e0613243a38f457ca | [] | no_license | Katerina964/study | d637c6e9108c55471d65993a55a10461a48322e3 | 231d843bc6d19a5648b0614b89b0d23b5e67aff1 | refs/heads/master | 2022-12-07T14:53:06.955617 | 2020-08-30T15:36:53 | 2020-08-30T15:36:53 | 281,940,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | myList = list(map(int, input().split()))
myCount = [0]*(max(myList) + 1)
for i in myList:
myCount[i] += 1
for j in range(len(myCount)):
if myCount[j] != 0:
print((str(str(j)) + ' ') * myCount[j], end='')
| [
"katrin.balakina@gmail.com"
] | katrin.balakina@gmail.com |
991d4de69cf4d02e139524321ae35c28fde31c33 | 2100df7054545b15927d66d4ef2f0490e78d0761 | /pyrake/contrib/spiders/init.py | 6df9d17d493a8d3a5235ba3dc82bac66bff9bbfc | [
"MIT"
] | permissive | elkingtowa/pyrake | 79dd6053de604ad52df530e544e873c107333489 | 3f2fbe805e9b153d287fb50b5cb3f5b35495ac52 | refs/heads/master | 2020-12-24T14:01:53.009374 | 2014-08-29T04:44:56 | 2014-08-29T04:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | from pyrake.spider import Spider
from pyrake.utils.spider import iterate_spider_output
class InitSpider(Spider):
"""Base Spider with initialization facilities"""
def start_requests(self):
self._postinit_reqs = super(InitSpider, self).start_requests()
return iterate_spider_output(self.init_request())
def initialized(self, response=None):
"""This method must be set as the callback of your last initialization
request. See self.init_request() docstring for more info.
"""
return self.__dict__.pop('_postinit_reqs')
def init_request(self):
"""This function should return one initialization request, with the
self.initialized method as callback. When the self.initialized method
is called this spider is considered initialized. If you need to perform
several requests for initializing your spider, you can do so by using
different callbacks. The only requirement is that the final callback
(of the last initialization request) must be self.initialized.
The default implementation calls self.initialized immediately, and
means that no initialization is needed. This method should be
overridden only when you need to perform requests to initialize your
spider
"""
return self.initialized()
| [
"elkingtonx@gmail.com"
] | elkingtonx@gmail.com |
3194c4ab797ce1b8f4400fec690c51535ff905d0 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_21494-7018/sdB_EC_21494-7018_lc.py | c918f886d9b93de45b09eaaf342bc1347c108811 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[328.421917,-70.075431], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_21494-7018 /sdB_EC_21494-7018_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
72f4109a0e0fedcb60f4f8b7d8fee1de0233209c | ba7be04fa897785fb9255df3ece0c1ffbead6acc | /accounts/urls.py | d18b9a7575e61e4a04f386d318d8904ada9213bd | [] | no_license | backupalisher/part4_project | e1f402553502d010ffe974ecce73e313f90b8174 | 09ca16e3021aeac609fe6594e5c4f6c72832d112 | refs/heads/master | 2022-12-10T13:22:15.899332 | 2020-09-21T16:22:02 | 2020-09-21T16:22:02 | 233,295,277 | 0 | 0 | null | 2022-12-08T10:47:45 | 2020-01-11T20:49:10 | Python | UTF-8 | Python | false | false | 975 | py | from django.contrib.auth import views
from django.urls import path
from .views import *
urlpatterns = [
path('', profile, name='profile'),
path('signup/', signup, name='signup'),
# path('login/', views.LoginView.as_view(), name='login'),
# path('logout/', views.LogoutView.as_view(), name='logout'),
# path('register/', register, name='register'),
# path('password-change/', views.PasswordChangeView.as_view(), name='password_change'),
# path('password-change/done/', views.PasswordChangeDoneView.as_view(), name='password_change_done'),
# path('password-reset/', views.PasswordResetView.as_view(), name='password_reset'),
# path('password-reset/done/', views.PasswordResetDoneView.as_view(), name='password_reset_done'),
# path('reset/<uidb64>/<token>/', views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
# path('reset/done/', views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
| [
"server.ares@gmail.com"
] | server.ares@gmail.com |
e6d7c14bcb2ffebd82689fe6731cb9a2f1a52217 | 7ec35bd037077e9b65d3fa26a91978e8652c7409 | /Stream-3/Full-Stack-Development/15.Paypal-Subscriptions/2.Setup-Products-That-Require-Subscriptions/we_are_social/we_are_social/urls.py | 2dfb2fab48b9c51868e1f1371047d9fbfc68719e | [
"MIT"
] | permissive | GunnerJnr/_CodeInstitute | 8f743abef66c33a77ce13ca719963e93ffe22607 | efba0984a3dc71558eef97724c85e274a712798c | refs/heads/master | 2023-01-05T10:53:57.536047 | 2020-01-27T13:04:12 | 2020-01-27T13:04:12 | 99,014,961 | 8 | 6 | MIT | 2022-12-26T20:24:35 | 2017-08-01T15:15:55 | CSS | UTF-8 | Python | false | false | 2,124 | py | """we_are_social URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.flatpages import views
from paypal.standard.ipn import urls as paypal_urls
from paypal_store import views as paypal_views
from accounts import views as accounts_views
from products import views as product_views
from hello import views as hello_views
from magazines import views as magazine_views
urlpatterns = [ # pylint: disable-msg=C0103
# admin backend url
url(r'^admin/', admin.site.urls),
# hello urls
url(r'^$', hello_views.get_index, name='index'),
# accounts urls
url(r'^register/$', accounts_views.register, name='register'),
url(r'^login/$', accounts_views.login, name='login'),
url(r'^profile/$', accounts_views.profile, name='profile'),
url(r'^logout/$', accounts_views.logout, name='logout'),
url(r'^about/$', views.flatpage, {'url': '/pages/about/'}, name='about'),
url(r'^cancel_subscription/$', accounts_views.cancel_subscription, name='cancel_subscription'),
url(r'^subscriptions_webhook/$', accounts_views.subscriptions_webhook,
name='subscriptions_webhook'),
# paypal urls
url(r'^a-very-hard-to-guess-url/', include(paypal_urls)),
url(r'^paypal-return/', paypal_views.paypal_return),
url(r'^paypal-cancel/', paypal_views.paypal_cancel),
# products urls
url(r'^products/$', product_views.all_products),
# magazines urls
url(r'^magazines/$', magazine_views.all_magazines),
]
| [
"gunnerjnr@live.co.uk"
] | gunnerjnr@live.co.uk |
7899fdfbbf13b5f9c779fbcd2eea944634d82a28 | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /tests2/materials/test_bsdf_next_dir.py | 428affae82f5fa97c9e60073db0a3eb83ef37d20 | [] | no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py |
import unittest
from random import random
from tdasm import Runtime
import renmas2
class TransmissionSampling(unittest.TestCase):
def setUp(self):
pass
def asm_code1(self, ren):
code = """
#DATA
"""
code += ren.structures.structs(("hitpoint",)) + """
uint32 next_dir_ptr
hitpoint hp
#CODE
; call next direction of material
mov eax, hp
call dword [next_dir_ptr]
#END
"""
return code
def test_transmission_sampling(self):
factory = renmas2.Factory()
ren = renmas2.Renderer()
runtime = Runtime()
mat = renmas2.core.material.Material(ren.converter.zero_spectrum())
eta_in = 1.3
eta_out = 1.0
sampling = renmas2.materials.PerfectTransmissionSampling(eta_in, eta_out)
mat.add(sampling)
eta_in = ren.converter.zero_spectrum().set(1.3)
eta_out = ren.converter.zero_spectrum().set(1.0)
fresnel = renmas2.materials.FresnelDielectric(eta_in, eta_out)
spec = ren.converter.create_spectrum((0.5, 0.5, 0.5))
perf_spec = renmas2.materials.PerfectTransmission(spec, fresnel, 1.0)
mat.add(perf_spec)
ref_sam = renmas2.materials.PerfectSpecularSampling()
mat.add(ref_sam)
spec2 = ren.converter.create_spectrum((0.9, 0.9, 0.9))
fresnel2 = renmas2.materials.FresnelDielectric(eta_in, eta_out)
perf_ref = renmas2.materials.PerfectSpecular(spec2, fresnel2, 1.0)
mat.add(perf_ref)
normal = factory.vector(2, 4.5, 5)
normal.normalize()
hit_point = factory.vector(3, 5, 6)
wo = factory.vector(-2, 1, 0)
wo.normalize()
hp = renmas2.shapes.HitPoint(1.5, hit_point, normal, 0)
hp.wo = wo
hp.fliped = False
ren.macro_call.set_runtimes([runtime])
mat.next_direction_bsdf_asm([runtime], ren.structures, ren.assembler)
mc = ren.assembler.assemble(self.asm_code1(ren))
ds = runtime.load("test", mc)
ds["next_dir_ptr"] = runtime.address_module(mat.nd_asm_name)
ds["hp.normal"] = (normal.x, normal.y, normal.z, 0.0)
ds["hp.t"] = 1.5
ds["hp.hit"] = (hit_point.x, hit_point.y, hit_point.z, 0.0)
ds["hp.wo"] = (wo.x, wo.y, wo.z, 0.0)
ds["hp.fliped"] = 0
runtime.run("test")
mat.next_direction_bsdf(hp)
print ("Python")
print (hp.wi)
print (hp.ndotwi)
print (hp.specular)
print (hp.f_spectrum)
print ("ASM")
print (ds["hp.wi"])
print (ds["hp.ndotwi"])
print (ds["hp.specular"])
print (ds["hp.f_spectrum.values"])
if __name__ == "__main__":
unittest.main()
| [
"mvidov@yahoo.com"
] | mvidov@yahoo.com |
2f5efcf28e794b48ae2a7dadd61e25b88cf457fd | 4b0c57dddf8bd98c021e0967b5d94563d15372e1 | /run_MatrixElement/test/emptyPSets/emptyPSet_STopTW_T_JESDown_cfg.py | 2a2135bef395f4bb05eda2c15816866286c4d777 | [] | no_license | aperloff/TAMUWW | fea6ed0066f3f2cef4d44c525ee843c6234460ba | c18e4b7822076bf74ee919509a6bd1f3cf780e11 | refs/heads/master | 2021-01-21T14:12:34.813887 | 2018-07-23T04:59:40 | 2018-07-23T04:59:40 | 10,922,954 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import FWCore.ParameterSet.Config as cms
import os
#!
#! PROCESS
#!
process = cms.Process("MatrixElementProcess")
#!
#! SERVICES
#!
#process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageLogger.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.load('CommonTools.UtilAlgos.TFileService_cfi')
process.TFileService.fileName=cms.string('STopTW_T_JESDown.root')
#!
#! INPUT
#!
inputFiles = cms.untracked.vstring(
'root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/STopTW_T_JESDown.root'
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.source = cms.Source("PoolSource",
skipEvents = cms.untracked.uint32(0),
fileNames = inputFiles )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
| [
"aperloff@physics.tamu.edu"
] | aperloff@physics.tamu.edu |
d87e688315f0b80a8c646b79758a4898063bf490 | 95c295e1f62edceae6a8aa8d8c8047b0b36b6fe5 | /World Finals/stack_management.py | f86335c6ddb5835cdb7582305f6201820fdb698c | [
"MIT"
] | permissive | Sharanyakethineni/GoogleCodeJam-2017 | 06c79e42940c559a591b17b514876a1eeddd9137 | 421fbab37aa90be9397da979e990bdd2266b1fb0 | refs/heads/master | 2023-03-23T00:26:19.552281 | 2020-07-30T16:10:48 | 2020-07-30T16:10:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,579 | py | # Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2017 Word Finals - Problem E. Stack Management
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000201909/00000000002017fd
#
# Time: O((N * C) * logN)
# Space: O(N * C)
#
from collections import defaultdict
from heapq import heappush, heappop
def preprocess(piles): # Time: O((N * C) * logN), Space: O(N)
min_heaps, stk = defaultdict(list), []
for i, pile in enumerate(piles):
value, suite = pile[-1]
heappush(min_heaps[suite], (value, i))
if len(min_heaps[suite]) > 1:
stk.append(suite)
while stk:
suite = stk.pop()
_, i = heappop(min_heaps[suite])
piles[i].pop()
if not piles[i]:
continue
value, suite = piles[i][-1]
heappush(min_heaps[suite], (value, i))
if len(min_heaps[suite]) > 1:
stk.append(suite)
def dfs(adj, source, targets): # Time: O(N), Space: O(N)
stk, lookup = [source], set([source])
while stk:
u = stk.pop()
if u in targets:
return True
if u not in adj:
continue
for v in adj[u]:
if v in lookup:
continue
lookup.add(v)
stk.append(v)
return False
def stack_management():
N, C = map(int, raw_input().strip().split())
piles = map(lambda x: PILES[x][:], map(int, raw_input().strip().split()))
preprocess(piles) # remove all cards if possible
for pile in piles:
if len(pile) > 1:
break
else:
return "POSSIBLE"
suite_to_max_two_values = defaultdict(list)
for i, pile in enumerate(piles): # Time: O((N * C) * log2), Space: O(N)
for idx, (value, suite) in enumerate(pile):
heappush(suite_to_max_two_values[suite], value)
if len(suite_to_max_two_values[suite]) == 3:
heappop(suite_to_max_two_values[suite])
elif len(suite_to_max_two_values) > len(piles):
return "IMPOSSIBLE" # early return
if len(suite_to_max_two_values) < len(piles):
return "POSSIBLE"
for pile in piles:
if not pile:
break
else:
return "IMPOSSIBLE" # no empty stack
vertices = {pile[0][1] for pile in piles if pile and pile[0][0] == suite_to_max_two_values[pile[0][1]][-1]} # Time: O(N)
sources, targets, adj = [], set(), defaultdict(list)
for i, pile in enumerate(piles): # Time: O(N * C)
if not pile:
continue
ace_value, ace_suite = pile[0]
if ace_value != suite_to_max_two_values[ace_suite][-1]:
continue
if len(suite_to_max_two_values[ace_suite]) == 1:
sources.append(ace_suite)
for value, suite in pile:
if suite == ace_suite:
continue
if value == suite_to_max_two_values[suite][-1]:
targets.add(ace_suite)
elif suite in vertices and len(suite_to_max_two_values[suite]) == 2 and value == suite_to_max_two_values[suite][-2]:
adj[ace_suite].append(suite)
for source in sources: # total - Time: O(N), Space: O(N)
if dfs(adj, source, targets):
break
else:
return "IMPOSSIBLE"
return "POSSIBLE"
P = input()
PILES = []
for _ in xrange(P):
V_S = map(int, raw_input().strip().split())
PILES.append([(V_S[2*i+1], V_S[2*i+2]) for i in reversed(xrange((len(V_S)-1)//2))])
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, stack_management())
| [
"kamyu104@gmail.com"
] | kamyu104@gmail.com |
2d33f7e8a3abaf14d63b1b7cb861ab56d1abaccc | c8ba6ebb9a58a62def08ec22fb990e029c6394ad | /service/workflow/workflow_base_service.py | a94f798c0c6a438e331b96ed7e380c4e113db531 | [
"MIT"
] | permissive | mjwei222/loonflow | 57e75a13c63b7affc398d76cd84966686c6e9d57 | 1762d090416a888b57194bc2ac9af4bf5d872497 | refs/heads/master | 2020-05-19T19:03:49.673059 | 2019-05-06T10:00:11 | 2019-05-06T10:00:11 | 185,169,961 | 0 | 0 | null | 2019-05-06T09:54:32 | 2019-05-06T09:54:32 | null | UTF-8 | Python | false | false | 8,377 | py | import json
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.workflow.models import Workflow
from service.base_service import BaseService
from service.common.log_service import auto_log
from service.account.account_base_service import AccountBaseService
class WorkflowBaseService(BaseService):
"""
流程服务
"""
def __init__(self):
pass
@classmethod
@auto_log
def get_workflow_list(cls, name, page, per_page, workflow_id_list):
"""
获取工作流列表
:param name:
:param page:
:param per_page:
:param workflow_id_list:工作流id list
:return:
"""
query_params = Q(is_deleted=False)
if name:
query_params &= Q(name__contains=name)
query_params &= Q(id__in=workflow_id_list)
workflow_querset = Workflow.objects.filter(query_params).order_by('id')
paginator = Paginator(workflow_querset, per_page)
try:
workflow_result_paginator = paginator.page(page)
except PageNotAnInteger:
workflow_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
workflow_result_paginator = paginator.page(paginator.num_pages)
workflow_result_object_list = workflow_result_paginator.object_list
workflow_result_restful_list = []
for workflow_result_object in workflow_result_object_list:
workflow_result_restful_list.append(dict(id=workflow_result_object.id, name=workflow_result_object.name, description=workflow_result_object.description,
notices=workflow_result_object.notices, view_permission_check=workflow_result_object.view_permission_check,
limit_expression=workflow_result_object.limit_expression, display_form_str=workflow_result_object.display_form_str,
creator=workflow_result_object.creator, gmt_created=str(workflow_result_object.gmt_created)[:19]))
return workflow_result_restful_list, dict(per_page=per_page, page=page, total=paginator.count)
@classmethod
@auto_log
def check_new_permission(cls, username, workflow_id):
"""
判断用户是否有新建工单的权限
:param username:
:param workflow_id:
:return:
"""
# 获取workflow的限制表达式
workflow_obj, msg = cls.get_by_id(workflow_id)
if not workflow_obj:
return False, msg
limit_expression = workflow_obj.limit_expression
if not limit_expression:
return True, 'no limit_expression set'
#'限制周期({"period":24} 24小时), 限制次数({"count":1}在限制周期内只允许提交1次), 限制级别({"level":1} 针对(1单个用户 2全局)限制周期限制次数,默认特定用户);允许特定人员提交({"allow_persons":"zhangsan,lisi"}只允许张三提交工单,{"allow_depts":"1,2"}只允许部门id为1和2的用户提交工单,{"allow_roles":"1,2"}只允许角色id为1和2的用户提交工单)
limit_expression_dict = json.loads(limit_expression)
limit_period = limit_expression_dict.get('period')
limit_count = limit_expression_dict.get('limit_count')
limit_allow_persons = limit_expression_dict.get('allow_persons')
limit_allow_depts = limit_expression_dict.get('allow_depts')
limit_allow_roles = limit_expression_dict.get('allow_roles')
if limit_period:
from service.ticket.ticket_base_service import TicketBaseService
if limit_expression_dict.get('level') == 1:
count_result, msg = TicketBaseService.get_ticket_count_by_args(workflow_id=workflow_id, username=username, period=limit_period)
elif limit_expression_dict.get('level') == 2:
count_result, msg = TicketBaseService.get_ticket_count_by_args(workflow_id=workflow_id, period=limit_period)
if count_result is False:
return False, msg
if count_result > limit_expression_dict.get('count'):
return False, '{} tickets can be created in {}hours when workflow_id is {}'.format(limit_count, limit_period, workflow_id)
if limit_allow_persons:
if username not in limit_allow_persons.split(','):
return False, '{} can not create ticket base on workflow_id:{}'.format(workflow_id)
if limit_allow_depts:
# 获取用户所属部门,包含上级部门
user_all_dept_id_list, msg = AccountBaseService.get_user_up_dept_id_list()
if user_all_dept_id_list is False:
return False, msg
# 只要user_all_dept_id_list中的某个部门包含在允许范围内即可
limit_allow_dept_str_list = limit_allow_depts.split(',')
limit_allow_dept_id_list = [int(limit_allow_dept_str) for limit_allow_dept_str in limit_allow_dept_str_list]
limit_allow_dept_id_list = list(set(limit_allow_dept_id_list)) #去重
total_list = user_all_dept_id_list + limit_allow_dept_id_list
if len(total_list) == len(set(total_list)):
return False, 'user is not in allow dept'
if limit_allow_roles:
# 获取用户所有的角色
user_role_list, msg = AccountBaseService.get_user_role_id_list(username)
if user_role_list is False:
return False, msg
limit_allow_role_str_list = limit_allow_roles.split(',')
limit_allow_role_id_list = [int(limit_allow_role_str) for limit_allow_role_str in limit_allow_role_str_list]
limit_allow_role_id_list = list(set(limit_allow_role_id_list))
total_list = limit_allow_role_id_list + user_role_list
if len(total_list) == len(set(total_list)):
return False, 'user is not in allow role'
return True, ''
@classmethod
@auto_log
def get_by_id(cls, workflow_id):
"""
获取工作流 by id
:param workflow_id:
:return:
"""
workflow_obj = Workflow.objects.filter(is_deleted=0, id=workflow_id).first()
if not workflow_obj:
return False, '工作流不存在'
return workflow_obj, ''
@classmethod
@auto_log
def add_workflow(cls, name, description, notices, view_permission_check, limit_expression, display_form_str, creator):
"""
新增工作流
:param name:
:param description:
:param notices:
:param view_permission_check:
:param limit_expression:
:param display_form_str:
:param creator:
:return:
"""
workflow_obj = Workflow(name=name, description=description, notices=notices, view_permission_check=view_permission_check,
limit_expression=limit_expression,display_form_str=display_form_str, creator=creator)
workflow_obj.save()
return workflow_obj.id, ''
@classmethod
@auto_log
def edit_workflow(cls, workflow_id, name, description, notices, view_permission_check, limit_expression, display_form_str):
"""
更新工作流
:param workflow_id:
:param name:
:param description:
:param notices:
:param view_permission_check:
:param limit_expression:
:param display_form_str:
:return:
"""
workflow_obj = Workflow.objects.filter(id=workflow_id, is_deleted=0)
if workflow_obj:
workflow_obj.update(name=name, description=description, notices=notices, view_permission_check=view_permission_check,
limit_expression=limit_expression, display_form_str=display_form_str)
return workflow_id, ''
@classmethod
@auto_log
def delete_workflow(cls, workflow_id):
"""
删除工作流
:param workflow_id:
:return:
"""
workflow_obj = Workflow.objects.filter(id=workflow_id, is_deleted=0)
if workflow_obj:
workflow_obj.update(is_deleted=True)
return workflow_id, ''
| [
"blackholll@163.com"
] | blackholll@163.com |
b9f7a4ed46d52bd9276ddbc146cce7690db1e7ff | a592517d7a041b4bbfc15a12c36314b246ac257e | /35bibao.py | e30eaed08c67df6581249c5f4c4462558df2d0f0 | [] | no_license | 8880/Python | b0d25616528b483e90e1e0021bce97e551e335b9 | 68ed65d51e1895d5fe3c71a9cafa4d9a780d1601 | refs/heads/master | 2021-01-16T17:55:24.773577 | 2017-08-30T10:28:32 | 2017-08-30T10:28:32 | 100,024,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | #!/user/bin/python
def out():
a = 1
def inner():
print a
print "I'm inner"
return inner
f = out()
f()
| [
"klous530.outlook.com"
] | klous530.outlook.com |
69bc4f8f28516bc0e2ba8a6674d66aa4234da201 | b1878f78661e3b4b11c26fca23d3703d936a02e4 | /source/mpsmpo/mpo_qphys.py | 3adf643abfa319d0de67389ef9346b2ba3255f7b | [] | no_license | zhendongli2008/zmpo_dmrg | c15872ff8032f6b9d3b295868ea1aeff6917ac58 | 2444ae845373ea3822c25313d25ebaf66474b50c | refs/heads/master | 2020-03-28T04:26:56.872073 | 2019-10-07T09:09:54 | 2019-10-07T09:09:54 | 147,715,300 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | import numpy
import itertools
# Vacuum
def vacuum(isym=1):
if isym == 1:
qvac = [[0]]
elif isym == 2:
qvac = [[0,0]]
return qvac
# Spin-orbitals
def init(norb,isym=1):
if isym == 0:
lst = None
elif isym == 1:
lst = [[[0],[1]]]*norb
elif isym == 2:
assert norb%2 == 0
lst = [[[0,0],[1,0.5]],[[0,0],[1,-0.5]]]*(norb/2)
else:
print 'error in mpo_qphys.init: no such isym=',isym
exit(1)
return lst
def prt(qphys):
n = 50
nsite = len(qphys)
print '[mpo_qphys.prt]'
print '-'*60
print ' Number of sites=',nsite
for i in range(nsite):
print ' isite=',i,qphys[i]
print '-'*60
return 0
# Merge two indices (n1,n2) into a combined index
def dpt(q1,q2):
lst = []
for qi,qj in itertools.product(q1,q2):
assert len(qi) == len(qj)
lst.append([qi[i]+qj[i] for i in range(len(qi))])
return lst
# Merge more physical indices
def merge(qphys,partition,debug=False):
if debug:
print '[mpo_qphys.merge]'
print ' partition = ',partition
print ' qphys = ',qphys
if qphys is None: return None
qphys_new = []
for ipart in partition:
npart = len(ipart)
qnew = qphys[ipart[0]]
for j in range(1,npart):
qnew = dpt(qnew,qphys[ipart[j]])
if debug:
print ' >>> ipart = ',ipart
print ' qnew = ',qnew
qphys_new.append(qnew)
return qphys_new
if __name__ == '__main__':
partition = [[0, 1, 2, 3], [4, 5], [6], [7]]
# Generate a new set of local quantum numbers
qphys = init(8)
prt(qphys)
qnew = merge(qphys,partition)
for idx,iqnum in enumerate(qnew):
print ' idx=',idx,iqnum
qphys = init(8,isym=2)
prt(qphys)
qnew = merge(qphys,partition)
for idx,iqnum in enumerate(qnew):
print ' idx=',idx,iqnum
| [
"zhendongli2008@gmail.com"
] | zhendongli2008@gmail.com |
3aef6f8cab6d02c6d01e016081566b54ecaa309c | a99dd87c230fb7849f9b1ef18b81701af112e581 | /python面向对象/pythonFaceObject/13_面向对象/10_类的特殊方法/mycmp.py | 8c2e02a749f1911caa36165e9750aaad00af44f6 | [] | no_license | wuhao2/Python_learning | a123ebc1b8c1850b4f9daa23cb949acef39d0f97 | 251b48dec58464ee932f0569697e1ab1e40f8c8c | refs/heads/master | 2020-12-02T17:44:34.981101 | 2017-09-27T11:17:39 | 2017-09-27T11:17:39 | 96,379,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def __lt__(self,oth):
return self.x < oth.x
def __gt__(self,oth):
return self.y > oth.y
if __name__ == '__main__':
pa = Point(0,1)
pb = Point(1,0)
print(pa < pb)
print(pa > pb) | [
"wu_hao1314520@126.com"
] | wu_hao1314520@126.com |
638b868340baeb9fe1d83fe498cf3fb4a0f48305 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/labeled_tensor/python/ops/sugar.pyi | e7e9b69e84d69d3b4b2f403985da8792c084f125 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | pyi | # Stubs for tensorflow.contrib.labeled_tensor.python.ops.sugar (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.labeled_tensor.python.ops import core as core, ops as ops
from typing import Any as Any, Optional as Optional
class ReshapeCoder:
def __init__(self, existing_axis_names: Any, new_axes: Any, name: Optional[Any] = ...) -> None: ...
def encode(self, labeled_tensor: Any): ...
def decode(self, labeled_tensor: Any): ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
b0be48ea204da1c7d9b4a39961071075e80b76a5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02923/s307479598.py | 70f0b023290a5035156622554fb201e02988587b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | n = int(input())
h = list(map(int,input().split()))
count = 0
ans = 0
for i in range(n):
if i == n-1 :
ans = max(ans,count)
count = 0
elif h[i] >= h[i+1]:
count += 1
else:
ans = max(ans,count)
count = 0
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
49076bbc7414538f16da0a9cbef9d8848038caf4 | cbe264842df4eae3569b28ed4aae9489014ed23c | /python/concurrency/tutorial/deadlock.py | 5bf779d707766c352290925384dc4db1353c8e95 | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | import threading
l = threading.Lock()
print('before first acquire')
l.acquire()
print('before second acquire')
l.acquire()
print('acquire lock twice') | [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
3377600bf816b184253a22a3870247116c0f02c7 | 54d2887e3c910f68366bd0aab3c692d54245e22a | /abc/abc_042_125/abc108/d.py | 7bede3006c0d50793bd9eca0c7415e33149df73e | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | import math
l = int(input())
# l -= 1
n = math.floor(math.log2(l)) + 1
edge = []
for i in range(1, n):
edge.append((i, i+1, 0))
for j, i in enumerate(reversed(range(1, n))):
edge.append((i, i+1, 2**j))
l2 = l - 2**(n-1)
c = 2**(n-1)
# print("test", c, l2)
while l2 > 0:
l_mini = 1
while l_mini * 2 <= l2:
l_mini *= 2
# print("test", c, l2, int(math.log2(l_mini)))
edge.append((1, n-int(math.log2(l_mini)), c))
l2 -= l_mini
c += l_mini
print(n, len(edge))
for i in range(len(edge)):
print(*edge[i])
| [
"kevinrobot34@yahoo.co.jp"
] | kevinrobot34@yahoo.co.jp |
384d53395d6c93bcd1a6496ffc9c71392245dac3 | dd1ff9175109b075b0c0701e28479e22a83e2b9e | /app/migrations/0087_auto_20200809_2154.py | 5abf7262c85408b96e30288d396d26a7184bc36d | [] | no_license | sotengboy/ojopisowifi_main | 7bda515b3eb122153e716177e139cea7b80c0e24 | cdbecd8d50408c10bdecf1439ee7c78fbbdf01d2 | refs/heads/master | 2023-04-08T16:53:27.633895 | 2020-12-17T14:30:11 | 2020-12-17T14:30:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | # Generated by Django 3.0.8 on 2020-08-09 13:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0086_auto_20200809_2104'),
]
operations = [
migrations.AlterModelOptions(
name='device',
options={'verbose_name': 'Hardware Setting', 'verbose_name_plural': 'Hardware Settings'},
),
migrations.AlterModelOptions(
name='pushnotifications',
options={'verbose_name': 'Push Notifications Setting', 'verbose_name_plural': 'Push Notification Settings'},
),
migrations.AlterModelOptions(
name='settings',
options={'verbose_name': 'Wifi Setting', 'verbose_name_plural': 'Wifi Settings'},
),
migrations.AlterField(
model_name='pushnotifications',
name='notification_trigger_time',
field=models.DurationField(blank=True, default=datetime.timedelta(0), help_text='Notification will fire when time is equal to the specified trigger time. Format: hh:mm:ss', null=True),
),
migrations.AlterField(
model_name='rates',
name='Minutes',
field=models.DurationField(default=datetime.timedelta(0), help_text='Internet access duration in hh:mm:ss format', verbose_name='Duration'),
),
migrations.AlterField(
model_name='vouchers',
name='Voucher_time_value',
field=models.DurationField(blank=True, default=datetime.timedelta(0), help_text='Time value in minutes.', null=True, verbose_name='Time Value'),
),
]
| [
"alinoclyde.juan24@gmail.com"
] | alinoclyde.juan24@gmail.com |
9544c441e6f55c1bef9e62cf70d31ac3f40d5f50 | d54fba8405a7bf77a096fdf6377e81a917fb8390 | /itest_platform/wsgi.py | 64ecf48c3616bc4887b59ae0a82b24442c303ffb | [] | no_license | defnngj/itest_platform | 8e8d4f6eaea4f6cc50d4a3fbba8a0c52144c8f42 | f60d62e3c2811763fcefe93d37ca4c6bbb2f665e | refs/heads/master | 2023-05-25T18:49:00.187381 | 2023-03-24T10:10:19 | 2023-03-24T10:10:19 | 219,531,335 | 12 | 9 | null | 2023-05-22T21:48:32 | 2019-11-04T15:17:08 | JavaScript | UTF-8 | Python | false | false | 405 | py | """
WSGI config for itest_platform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'itest_platform.settings')
application = get_wsgi_application()
| [
"fnngj@126.com"
] | fnngj@126.com |
a19b651adb4453e938ddbecd2f9c0229542d443b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/perfectsquare_20200504162116.py | edae58275003dd81944cea4a5a76613f99fa22e1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import math
def perfect(sq):
root= math.sqrt(sq)
root1 = math.pow(root,2)
if root1 == sq:
print(sq,'is a perfect square')
else:
print('Its not a perfect square')
print(root)
perfect(72) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
4b92ca5801935c6dce9db504233e16eb5f52b77c | e27333261b8e579564016c71d2061cc33972a8b8 | /development_codes/Backend/.history/IR_engine_20210809205551.py | 39f9b38d97b34425dd0667846996d6610b0a6b83 | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | from Word2Vecimplementation import Word2VecModel
from BM25implementation import *
from tfidfImplementation import *
from IPython.display import display
from UnigramLanguageModelImplementation import *
RETURN_SIZE = 30
'''
Functions to write:
1. tf-idf with cosine sim/Euclidean distance
- represent terms in each document with its tf-idf weights,
2. VSM with cosine sim/Euclidean distance
3. BIM
4. BM25
5. BERT
'''
titles_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles.csv"
tweets_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\tweets_data_stemmed.csv"
SEARCH_MODELS = {
"tfcs": "Tf-idf w Cosine Sim",
"tfed": "Tf-idf w Euclidean Dist",
"BM25": "Okapi-BM25",
"W2Vcs": "Word2Vec w Cosine Similarity",
"W2Ved": "Word2Vec w Euclidean Distance",
"ULM": "Unigram Language Model"
}
tweet_col_names = ["article_id","tweet_id", "relevance", "tweet", "clean_text"]
class DataProcessor:
def __init__(self):
self.titles_data = pd.read_csv(titles_file_path)
self.tweets_data = pd.read_csv(tweets_file_path)
self.titles_data = self.titles_data.dropna()
self.tweets_data = self.tweets_data.dropna()
self.cosineSimilarity = CosineSimilarity(self.titles_data, self.tweets_data, return_size = RETURN_SIZE)
self.euclideanDistance = EuclideanDistance(self.titles_data, self.tweets_data, return_size = RETURN_SIZE)
self.word2VecModel = Word2VecModel(self.tweets_data)
self.unigramLanguageModel = UnigramLanguageModel(self.tweets_data)
print ("Data Processor up and ready...")
def returnTweetsBasedOnSearchModel(self, articleId, articleTitle, searchModel):
#accepts a search model, article title, and article id, returns n most relevant results
if searchModel == SEARCH_MODELS["tfcs"]:
return self.cosineSimilarity.query(articleId, articleTitle)
if searchModel == SEARCH_MODELS["tfed"]:
return self.euclideanDistance.query(articleId, articleTitle)
if searchModel == SEARCH_MODELS["BM25"]:
rankedDocs = self.BM25query(articleId, articleTitle)
if searchModel == SEARCH_MODELS["W2Vcs"]:
rankedDocs = self.Word2Vecquery(articleId, articleTitle, SEARCH_MODELS["W2Vcs"])
if searchModel == SEARCH_MODELS["W2Ved"]:
rankedDocs = self.Word2Vecquery(articleId, articleTitle, SEARCH_MODELS["W2Ved"])
if searchModel == SEARCH_MODELS["ULM"]:
rankedDocs = self.UnigramLanguageModelQuery(articleId, articleTitle)
rankedDocs = rankedDocs.reset_index(drop = True)
rankedDocs = rankedDocs.apply(lambda row: self.checkIfArticleIdMatchesQueryId(row, articleId), axis=1)
return rankedDocs
def checkIfArticleIdMatchesQueryId(self, pandasRow, articleId):
if pandasRow.article_id != articleId:
pandasRow.relevance_score = 0
return pandasRow
def UnigramLanguageModelQuery(self, articleId, articleTitle):
rankedDocs = self.unigramLanguageModel.getQueryLikelihoodModelScore(articleTitle)
return rankedDocs[:RETURN_SIZE]
def Word2Vecquery(self, articleId, articleTitle, type = SEARCH_MODELS["W2Vcs"]):
rankedDocs = self.word2VecModel.return_most_significant_tweets(articleTitle, type = type)
return rankedDocs[:RETURN_SIZE]
def BM25query(self, articleId, articleTitle):
tweet_col_names = ["related_article","tweet_id", "relevance", "text", "clean_text"]
query_list = QueryParsers(articleTitle).query
bM25Class = BM25Class(self.tweets_data, query_list)
rankedDocs = bM25Class.rankedDocs[:RETURN_SIZE]
return_dataFrame = pd.DataFrame()
for dataPoint in rankedDocs:
tweetId = dataPoint[0]
for index, row in self.tweets_data.iterrows():
if (row.tweet_id == tweetId):
return_dataFrame = return_dataFrame.append(row)
continue
#return_dataFrame = return_dataFrame.drop(["clean_text"], axis = 1)
return return_dataFrame
#dataProcessor = DataProcessor()
test_title_1 = "Company Update (NYSE:MET): MetLife Increases Share Repurchase Authorization to $1 Billion"
test_title_1_id = "8b31120e-d654-45b4-a5df-8fef674339d8"
test_title_2 = "Perkins Eastman Celebrates Groundbreaking of Clark-Lindsey's Small Homes for Dementia Care"
test_title_2_id = "32023021-1141-4832-9939-c8442d505b34"
#display(dataProcessor.BM25query("123", test_title_1))
#dataProcessor = DataProcessor()
#dataProcessor.returnTweetsBasedOnSearchModel(test_title_1_id, test_title_1, "Unigram Language Model")
| [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
2ddfd1822850778566fabffc9c42742504ca3e1e | 62472ed1a973171047f8ce6075d4f1cb3389304f | /data/henry-ford-hospital/scrape.py | fbc0e3476d6c12916cc169eee03d59f2fe019c24 | [
"MIT"
] | permissive | vsoch/hospital-chargemaster | 8e764da52b25acee021c1244b22f2b614343b2c2 | b3473c798fd2f343f7f02c1e32496f9eea9fa94d | refs/heads/master | 2021-10-19T23:53:12.439715 | 2019-02-24T18:35:58 | 2019-02-24T18:35:58 | 166,080,583 | 40 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | #!/usr/bin/env python
import os
import requests
import json
import datetime
import shutil
from bs4 import BeautifulSoup
here = os.path.dirname(os.path.abspath(__file__))
hospital_id = os.path.basename(here)
url ='https://www.henryford.com/visitors/billing/cost-of-care/hospital-standard-charges'
today = datetime.datetime.today().strftime('%Y-%m-%d')
outdir = os.path.join(here, today)
if not os.path.exists(outdir):
os.mkdir(outdir)
prefix = "https://www.henryford.com"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
# Each folder will have a list of records
records = []
for entry in soup.find_all('a', href=True):
download_url = prefix + entry['href']
if '.xlsx' in download_url:
filename = os.path.basename(download_url.split('?')[0])
entry_name = entry.text
entry_uri = entry_name.strip().lower().replace(' ','-')
output_file = os.path.join(outdir, filename)
os.system('wget -O "%s" "%s"' % (output_file, download_url))
record = { 'hospital_id': hospital_id,
'filename': filename,
'date': today,
'uri': entry_uri,
'name': entry_name,
'url': download_url }
records.append(record)
# Keep json record of all files included
records_file = os.path.join(outdir, 'records.json')
with open(records_file, 'w') as filey:
filey.write(json.dumps(records, indent=4))
# This folder is also latest.
latest = os.path.join(here, 'latest')
if os.path.exists(latest):
shutil.rmtree(latest)
shutil.copytree(outdir, latest)
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
6d9eac77a9da423807434d15948382050b31bfd0 | 484a348682d9fa515666b94a5cd3a13b1b725a9e | /Leetcode/[300]最长上升子序列 -- 暴力dfs.py | 87723d989edd2c42ea6f8d0b4077fe09618e6e09 | [] | no_license | joseph-mutu/Codes-of-Algorithms-and-Data-Structure | 1a73772825c3895419d86d6f1f506d58617f3ff0 | d62591683d0e2a14c72cdc64ae1a36532c3b33db | refs/heads/master | 2020-12-29T17:01:55.097518 | 2020-04-15T19:25:43 | 2020-04-15T19:25:43 | 238,677,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-03-14 08:47:15
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
self.max_length = 0
for start in range(len(nums)):
self.dfs(start,[nums[start]],nums[start],nums)
return self.max_length
def dfs(self,start,path,tem_max,nums):
if len(path) > self.max_length:
self.max_length = len(path)
for cur in range(start + 1,len(nums)):
if nums[cur] <= tem_max:
continue
path.append(nums[cur])
self.dfs(cur,path,nums[cur],nums)
path.pop()
s = Solution()
print(s.lengthOfLIS([2])) | [
"josephmathone@gmail.com"
] | josephmathone@gmail.com |
b5c9ad10af4acceb97a55ecee6b6b2822afbc5d9 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tools/log2timeline.py | f219e1358c56308d1894ecafb3b9320f7e1d8122 | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 1,399 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""The log2timeline command line tool."""
from __future__ import unicode_literals
import logging
import multiprocessing
import sys
from plaso import dependencies
from plaso.cli import log2timeline_tool
from plaso.lib import errors
def Main():
"""The main function."""
multiprocessing.freeze_support()
tool = log2timeline_tool.Log2TimelineTool()
if not tool.ParseArguments():
return False
if tool.show_info:
tool.ShowInfo()
return True
have_list_option = False
if tool.list_hashers:
tool.ListHashers()
have_list_option = True
if tool.list_parsers_and_plugins:
tool.ListParsersAndPlugins()
have_list_option = True
if tool.list_profilers:
tool.ListProfilers()
have_list_option = True
if tool.list_timezones:
tool.ListTimeZones()
have_list_option = True
if have_list_option:
return True
if tool.dependencies_check and not dependencies.CheckDependencies(
verbose_output=False):
return False
try:
tool.ExtractEventsFromSources()
except (KeyboardInterrupt, errors.UserAbort):
logging.warning('Aborted by user.')
return False
except (errors.BadConfigOption, errors.SourceScannerError) as exception:
logging.warning(exception)
return False
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
103eb3dcd33ff3ea3bc1d45b5fa085fc54c39aa3 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/04_Exceptions/14_exception_hooks.py | 7c96ac65df01185e4831837be121cc14250b60fa | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/python3
"""
Purpose: Exception Hooks
- When exception is raised with excepthook set,
case 1: when exception is handled with except block,
it works as ordinary exception handling
case 2: When excetpion is not handled with except block,
exception hook function is executed, and program stops
"""
import sys
def my_excepthook(exctype, value, traceback):
print('\nUnhandled error')
print('\tType :', exctype)
print('\tValue :', value)
print('\tTraceback:', traceback)
sys.excepthook = my_excepthook
print('Before exception')
# Case 1
try:
raise RuntimeError('This is the error message')
except Exception as ex:
print(ex)
# Case 2
raise RuntimeError('This is the error message')
# 1 + '213'
print('After exception')
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
e91871d6fb8477470ec9af862c254edd245f1ec7 | 745e2421cd2d110c37ec91315567678f50a7647f | /Ex-87_get_multiples.py | d347e6833afd8d0cdb88171be651aa1dd60dc828 | [] | no_license | TetianaSob/Python-Projects | 3a5760b21029db2de1123d4aa23a614c3ba41c33 | c37528db7e8d65e903357e2cdb7fa64e46537f15 | refs/heads/main | 2023-03-19T15:30:58.800231 | 2021-03-02T16:51:56 | 2021-03-02T16:51:56 | 310,383,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # Ex-87_get_multiples.py
'''get_multiples
Write a function called 'get_multiples, which accepts a number and a count,
nad returns a generator that yields the first count multiples of the number.
The default number should be 1, and the default count should be 10.'''
'''
evens = get_multiples(2, 3)
next(evens) # 2
next(evens) # 4
next(evens) # 6
next(evens) # StopIteration
default_multiples = get_multiples()
list(default_multiples) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
'''
#Get Multiples Generator Solution
def get_multiples(num=1, count=10):
next_num = num
while count > 0:
yield next_num
count -= 1
next_num += num
evens = get_multiples(2, 3)
print(next(evens)) # 2
print(next(evens)) # 4
print(next(evens)) # 6
print(next(evens)) # StopIteration
| [
"noreply@github.com"
] | TetianaSob.noreply@github.com |
f7ded61ad62afc5929092458097751cef70dcd47 | 75d318b2f125ec1d08195f12f8cc3870b3aa3056 | /cds_ils/importer/overdo.py | c3b662f4ee3215153abf1b19867ee2f9c57261b3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | topless/cds-books | d496913da2fdbcc42bf7bc7987fe60f80d6af35a | e587fec6c191fddebce3c3f9f61aae625db31254 | refs/heads/master | 2023-01-24T19:19:25.923716 | 2020-11-25T13:20:48 | 2020-11-26T07:49:11 | 257,366,115 | 0 | 0 | MIT | 2020-04-20T18:12:40 | 2020-04-20T18:12:39 | null | UTF-8 | Python | false | false | 2,795 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# CDS-ILS is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""CDS-ILS Overdo module."""
from cds_dojson.overdo import Overdo
from dojson._compat import iteritems
from dojson.errors import IgnoreKey, MissingRule
from dojson.utils import GroupableOrderedDict
class CdsIlsOverdo(Overdo):
"""Overwrite API of Overdo dojson class."""
def do(
self,
blob,
ignore_missing=True,
exception_handlers=None,
init_fields=None,
):
"""Translate blob values and instantiate new model instance.
Raises ``MissingRule`` when no rule matched and ``ignore_missing``
is ``False``.
:param blob: ``dict``-like object on which the matching rules are
going to be applied.
:param ignore_missing: Set to ``False`` if you prefer to raise
an exception ``MissingRule`` for the first
key that it is not matching any rule.
:param exception_handlers: Give custom exception handlers to take care
of non-standard codes that are installation
specific.
"""
handlers = {IgnoreKey: None}
handlers.update(exception_handlers or {})
def clean_missing(exc, output, key, value):
order = output.get("__order__")
if order:
order.remove(key)
if ignore_missing:
handlers.setdefault(MissingRule, clean_missing)
output = {}
if init_fields:
output.update(**init_fields)
if self.index is None:
self.build()
if isinstance(blob, GroupableOrderedDict):
items = blob.iteritems(repeated=True, with_order=False)
else:
items = iteritems(blob)
for key, value in items:
try:
result = self.index.query(key)
if not result:
raise MissingRule(key)
name, creator = result
data = creator(output, key, value)
if getattr(creator, "__extend__", False):
existing = output.get(name, [])
existing.extend(data)
output[name] = existing
else:
output[name] = data
except Exception as exc:
if exc.__class__ in handlers:
handler = handlers[exc.__class__]
if handler is not None:
handler(exc, output, key, value)
else:
raise
return output
| [
"38131488+kprzerwa@users.noreply.github.com"
] | 38131488+kprzerwa@users.noreply.github.com |
b83714b7cdf594b531b74a7850e6a067a7542db4 | 6b551bec528a1d6544201d3c6d86835e885343b5 | /deep_privacy/engine/checkpointer.py | 3b61cfe87ae228ba4e4fec5ac64a5dd532189bad | [
"MIT",
"Apache-2.0"
] | permissive | hukkelas/DeepPrivacy | 9471c8e9389828aa09330905081205b061161d81 | 5ee3f1b0608f03ac54d5694b6421f6132cb63f0e | refs/heads/master | 2023-08-16T00:41:02.366235 | 2023-03-28T06:23:34 | 2023-03-28T06:23:34 | 206,106,232 | 1,288 | 194 | MIT | 2021-08-18T08:21:33 | 2019-09-03T15:08:27 | Python | UTF-8 | Python | false | false | 3,957 | py | import torch
from deep_privacy import logger
import pathlib
def _get_map_location():
if not torch.cuda.is_available():
logger.warn(
"Cuda is not available. Forcing map checkpoint to be loaded into CPU.")
return "cpu"
return None
def load_checkpoint_from_url(model_url: str):
if model_url is None:
return None
return torch.hub.load_state_dict_from_url(
model_url, map_location=_get_map_location())
def load_checkpoint(ckpt_dir_or_file: pathlib.Path) -> dict:
if ckpt_dir_or_file.is_dir():
with open(ckpt_dir_or_file.joinpath('latest_checkpoint')) as f:
ckpt_path = f.readline().strip()
ckpt_path = ckpt_dir_or_file.joinpath(ckpt_path)
else:
ckpt_path = ckpt_dir_or_file
if not ckpt_path.is_file():
raise FileNotFoundError(f"Did not find path: {ckpt_path}")
ckpt = torch.load(ckpt_path, map_location=_get_map_location())
logger.info(f"Loaded checkpoint from {ckpt_path}")
return ckpt
def _get_checkpoint_path(
output_dir: str, validation_checkpoint_step: int = None):
if validation_checkpoint_step is None:
return pathlib.Path(output_dir, "checkpoints")
step = validation_checkpoint_step * 10**6
path = pathlib.Path(
output_dir, "validation_checkpoints", f"step_{step}.ckpt")
return path
def get_checkpoint(
output_dir: str, validation_checkpoint_step: int = None):
path = _get_checkpoint_path(output_dir, validation_checkpoint_step)
return load_checkpoint(path)
def get_previous_checkpoints(directory: pathlib.Path) -> list:
if directory.is_file():
directory = directory.parent
list_path = directory.joinpath("latest_checkpoint")
list_path.touch(exist_ok=True)
with open(list_path) as fp:
ckpt_list = fp.readlines()
return [_.strip() for _ in ckpt_list]
def get_checkpoint_step(output_dir: str, validation_checkpoint_step: int):
if validation_checkpoint_step is not None:
return validation_checkpoint_step
directory = _get_checkpoint_path(output_dir)
ckpt_path = get_previous_checkpoints(directory)[0]
print(ckpt_path)
ckpt_path = pathlib.Path(ckpt_path)
step = ckpt_path.stem.replace("step_", "")
step = step.replace(".ckpt", "")
return int(step)
class Checkpointer:
def __init__(self, output_dir: str):
self.checkpoint_dir = pathlib.Path(
output_dir, "checkpoints")
self.checkpoint_dir.mkdir(exist_ok=True, parents=True)
def save_checkpoint(
self,
state_dict: dict,
filepath: pathlib.Path = None,
max_keep=2):
if filepath is None:
global_step = self.trainer.global_step
filename = f"step_{global_step}.ckpt"
filepath = self.checkpoint_dir.joinpath(filename)
list_path = filepath.parent.joinpath("latest_checkpoint")
torch.save(state_dict, filepath)
previous_checkpoints = get_previous_checkpoints(filepath)
if filepath.name not in previous_checkpoints:
previous_checkpoints = [filepath.name] + previous_checkpoints
if len(previous_checkpoints) > max_keep:
for ckpt in previous_checkpoints[max_keep:]:
path = self.checkpoint_dir.joinpath(ckpt)
if path.exists():
logger.info(f"Removing old checkpoint: {path}")
path.unlink()
previous_checkpoints = previous_checkpoints[:max_keep]
with open(list_path, 'w') as fp:
fp.write("\n".join(previous_checkpoints))
logger.info(f"Saved checkpoint to: {filepath}")
def checkpoint_exists(self) -> bool:
num_checkpoints = len(list(self.checkpoint_dir.glob("*.ckpt")))
return num_checkpoints > 0
def load_checkpoint(self) -> dict:
checkpoint = load_checkpoint(self.checkpoint_dir)
return checkpoint
| [
"hakon.hukkelas@ntnu.no"
] | hakon.hukkelas@ntnu.no |
2f444de4f83c1de8d52ca75fa8612f9a548c98c8 | f9eb0b98326dc23b4b5b2f3b4c135533c53846f9 | /backend/test_app_19327/settings.py | 95b4b4246d7c9e1dad5636e2c94397fa2e4f1ea7 | [] | no_license | crowdbotics-apps/test-app-19327 | 132fbc0403dd2c8cb009e90d9cde06c0348f8940 | 932ad7293e5a1c26a9eaccfdd9b6973df309f624 | refs/heads/master | 2022-11-27T12:00:43.480308 | 2020-08-03T03:56:32 | 2020-08-03T03:56:32 | 284,598,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,936 | py | """
Django settings for test_app_19327 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"task",
"task_profile",
"tasker_business",
"location",
"wallet",
"task_category",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "test_app_19327.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "test_app_19327.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b509bda484c47b617a3da44196e39dfb565938bc | 81b20a9c51779c21b779ac0b1c5bf669359521ef | /py_object_detection/tf_api/object_detection/utils/np_box_ops_test.py | c7cdef0dcf7817fb6f44cf2a38d2ebb1a332891d | [] | no_license | thekindler/py-object-detection | bae1401f025458605c9244f9a763e17a0138d2ec | a8d13c496bab392ef5c8ad91a20fbfa9af1899bb | refs/heads/master | 2023-06-23T02:42:08.180311 | 2021-07-17T18:40:46 | 2021-07-17T18:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,511 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_box_ops."""
import numpy as np
import tensorflow as tf
from py_object_detection.tf_api.object_detection.utils import np_box_ops
class BoxOpsTests(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxes1 = boxes1
self.boxes2 = boxes2
def testArea(self):
areas = np_box_ops.area(self.boxes1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_box_ops.intersection(self.boxes1, self.boxes2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_box_ops.iou(self.boxes1, self.boxes2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
boxes1 = np.array([[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75]],
dtype=np.float32)
boxes2 = np.array([[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]],
dtype=np.float32)
ioa21 = np_box_ops.ioa(boxes2, boxes1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| [
"uniquetrij@gmail.com"
] | uniquetrij@gmail.com |
2d5284a76126dcab7570f00318cb2f8415d16578 | 0798277f2706998ab80442ac931579eb47f676e5 | /tests/unit/boundary/plugin_manifest_test.py | 90b98b0cac00a047e428d72e00ba6daa3f44eb9e | [
"Apache-2.0"
] | permissive | isabella232/pulse-api-cli | 49ed38b0694ab289802f69ee6df4911cf3378e3f | b01ca65b442eed19faac309c9d62bbc3cb2c098f | refs/heads/master | 2023-03-18T00:23:15.295727 | 2016-05-13T15:44:08 | 2016-05-13T15:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | #!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from boundary import PluginManifest
import os.path
class TestPluginManifest(TestCase):
def setUp(self):
self.filename = os.path.join(os.path.dirname(__file__), 'plugin.json')
self.pm = PluginManifest(self.filename)
self.pm.load()
def test_load(self):
pm = PluginManifest(self.filename)
pm.load()
def test_check_data_members(self):
self.assertEqual('Boundary README Test', self.pm.name,
'Check for name')
self.assertEqual('Example plugin.json for testing README.md generation', self.pm.description, 'Check for description')
self.assertEqual('2.0', self.pm.version)
self.assertEqual('meter', self.pm.tags)
self.assertEqual('icon.png', self.pm.icon)
self.assertEqual('node index.js', self.pm.command)
self.assertEqual('boundary-meter init.lua', self.pm.command_lua)
self.assertEqual('npm install', self.pm.post_extract)
self.assertEqual('', self.pm.post_extract_lua)
self.assertEqual('node_modules', self.pm.ignore)
self.assertEqual(['BOUNDARY_README_METRIC'], self.pm.metrics)
def test_check_for_param_array(self):
a = self.pm.param_array
self.assertTrue(a is not None)
def test_check_param_array(self):
pass
| [
"davidg@boundary.com"
] | davidg@boundary.com |
db9918c077407392955be4de341b11d7501172dc | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/core/_http_status_codes.py | 85caf0ab0ff6314cdfa7b0d086dac6a33def6c69 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 1,949 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
HTTP_STATUS_CODES = {
100: "Continue",
101: "Switching Protocols",
102: "Processing",
103: "Early Hints",
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi-Status",
208: "Already Reported",
226: "IM Used",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
306: "(Unused)",
307: "Temporary Redirect",
308: "Permanent Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Payload Too Large",
414: "URI Too Long",
415: "Unsupported Media Type",
416: "Range Not Satisfiable",
417: "Expectation Failed",
421: "Misdirected Request",
422: "Unprocessable Entity",
423: "Locked",
424: "Failed Dependency",
425: "Too Early",
426: "Upgrade Required",
428: "Precondition Required",
429: "Too Many Requests",
431: "Request Header Fields Too Large",
451: "Unavailable For Legal Reasons",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Timeout",
505: "HTTP Version Not Supported",
506: "Variant Also Negotiates",
507: "Insufficient Storage",
508: "Loop Detected",
510: "Not Extended",
511: "Network Authentication Required",
}
| [
"elmernocon@gmail.com"
] | elmernocon@gmail.com |
608cb50deab55d098e240fcbd4c2e54b2ec5c049 | 87b006149b16a3028385fc58cf781f5a12c94ad9 | /PyFunceble/checker/syntax/domain.py | 268e6fd1c5220848ff184b3c695321675860af88 | [
"Apache-2.0"
] | permissive | spirillen/PyFunceble | 04d03b2678ad46ec81c520a32df5397832414451 | 3c8f62062bffa0e16d465c150a853af8bf2f2205 | refs/heads/master | 2023-05-12T04:32:04.587521 | 2022-11-20T11:19:06 | 2022-11-20T11:19:06 | 237,827,167 | 2 | 0 | Apache-2.0 | 2021-01-27T10:09:59 | 2020-02-02T19:50:47 | Python | UTF-8 | Python | false | false | 4,819 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the general domain syntax checker.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/latest/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2022 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from sqlalchemy.orm import Session
from PyFunceble.checker.syntax.base import SyntaxCheckerBase
from PyFunceble.checker.syntax.domain_base import DomainSyntaxCheckerBase
from PyFunceble.checker.syntax.second_lvl_domain import SecondLvlDomainSyntaxChecker
from PyFunceble.checker.syntax.status import SyntaxCheckerStatus
from PyFunceble.checker.syntax.subdomain import SubDomainSyntaxChecker
class DomainSyntaxChecker(DomainSyntaxCheckerBase, SyntaxCheckerBase):
"""
Provides an interface to check the syntax of a domain.
:param str subject:
Optional, The subject to work with.
"""
second_level_checker: Optional[SecondLvlDomainSyntaxChecker] = None
subdomain_checker: Optional[SubDomainSyntaxChecker] = None
def __init__(
self, subject: Optional[str] = None, db_session: Optional[Session] = None
) -> None:
self.second_level_checker: SecondLvlDomainSyntaxChecker = (
SecondLvlDomainSyntaxChecker()
)
self.subdomain_checker: SubDomainSyntaxChecker = SubDomainSyntaxChecker()
self.db_session = db_session
super().__init__(subject)
def subject_propagator(self) -> "DomainSyntaxChecker":
"""
Propagate the currently set subject.
.. warning::
You are not invited to run this method directly.
"""
self.second_level_checker.subject = self.idna_subject
self.subdomain_checker.subject = self.idna_subject
self.status = SyntaxCheckerStatus()
self.status.subject = self.subject
self.status.idna_subject = self.idna_subject
return self
@DomainSyntaxCheckerBase.ensure_subject_is_given
def is_valid(self) -> bool:
"""
Validate the given subject if exists.
"""
return self.is_valid_second_level() or self.is_valid_subdomain()
@DomainSyntaxCheckerBase.ensure_subject_is_given
def is_valid_second_level(self) -> bool:
"""
Checks if the given subject is a valid second level demain.
"""
return self.second_level_checker.is_valid()
@DomainSyntaxCheckerBase.ensure_subject_is_given
def is_valid_subdomain(self) -> bool:
"""
Checks if the given subject is a valid subdomain
"""
return self.subdomain_checker.is_valid()
| [
"contact@funilrys.com"
] | contact@funilrys.com |
2ddc209c64e0d3b7f48b386b986175dc2a0696f4 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /python/ZprimeToBB_M_650_TuneD6T_8TeV_pythia6_cff.py | 3d24605a0fea9ff575cb153617003dfa5b732caf | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUED6TSettings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
comEnergy = cms.double(8000.0),
crossSection = cms.untracked.double(39.43),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('PMAS(32,1)= 650 !mass of Zprime',
'MSEL=0 !(D=1) to select between full user control (0, then use MSUB) and some preprogrammed alternative',
'MSTP(44) = 3 !only select the Z process',
'MSUB(141) = 1 !ff gamma z0 Z0',
'MDME(289,1)= 0 !d dbar',
'MDME(290,1)= 0 !u ubar',
'MDME(291,1)= 0 !s sbar',
'MDME(292,1)= 0 !c cbar',
'MDME(293,1)= 1 !b bar',
'MDME(294,1)= 0 !t tbar',
'MDME(295,1)= 0 !4th gen Q Qbar',
'MDME(296,1)= 0 !4th gen Q Qbar',
'MDME(297,1)= 0 !e e',
'MDME(298,1)= 0 !neutrino e e',
'MDME(299,1)= 0 ! mu mu',
'MDME(300,1)= 0 !neutrino mu mu',
'MDME(301,1)= 0 !tau tau',
'MDME(302,1)= 0 !neutrino tau tau',
'MDME(303,1)= 0 !4th generation lepton',
'MDME(304,1)= 0 !4th generation neutrino',
'MDME(305,1)= 0 !W W',
'MDME(306,1)= 0 !H charged higgs',
'MDME(307,1)= 0 !Z',
'MDME(308,1)= 0 !Z',
'MDME(309,1)= 0 !sm higgs',
'MDME(310,1)= 0 !weird neutral higgs HA'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1. %'),
annotation = cms.untracked.string('default documentation string for ZprimeToBB_M_650_8TeV_TuneD6T_cff.py'),
name = cms.untracked.string('$Source: /cvs/CMSSW/CMSSW/Configuration/GenProduction/python/Attic/ZprimeToBB_M_650_TuneD6T_8TeV_pythia6_cff.py,v $')
)
| [
"david.renhwa.yu@gmail.com"
] | david.renhwa.yu@gmail.com |
7176a5ec3ca8875210b92c35776d110e8189618a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/279/83120/submittedfiles/testes.py | 52217fe9363f472622d54e36123bc347cd4dab68 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def primo (n):
n=float(input())
contador = 0
for i in range (2,n,1):
if n%i == 0:
contador += 1
break
if contador == 0 :
return true
else:
return false
print (primo(8))
print (primo(11)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c4f9185a0dc66cd639d3ae37889c57d4ccd27b3e | 12cfd1678317f40ca778b8c65d7323637c8c358d | /2018腾讯广告算法大赛/team1_lgb&nffm/src/018_rong_p.py | f091eb254298ddd7ff38313ee25ecd2156e4ad7d | [] | no_license | moonlight1776/competitions | e73d0ce358d2b5662b801820fd2f6434f2af4e2e | a372a2051a2a1580feb280ce6842a785459cf8d1 | refs/heads/master | 2022-03-21T09:31:20.276644 | 2019-11-09T05:37:46 | 2019-11-09T05:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | import pandas as pd
from sklearn.metrics import roc_auc_score
evals_y = pd.read_csv('data_preprocessing/evals_y.csv',header=None)[0].values
print('evals_ypre...')
evals_ypre = pd.DataFrame()
for i in range(1,10):
col_name = 'model'+str(i)
evals_ypre[col_name] = pd.read_csv('data_preprocessing/evals_ypre_'+str(i)+'_p.csv',header=None)[0].values
evals_ypre[model10] = pd.read_csv('data_preprocessing/evals_ypre.csv',header=None)[0].values
print('ytest...')
ytest = pd.DataFrame()
for i in range(1,10):
col_name = 'model'+str(i)
ytest[col_name] = pd.read_csv('data_preprocessing/submission2_'+str(i)+'_p.csv')['score'].values
ytest[model10] =pd.read_csv('data_preprocessing/submission2.csv')['score'].values
def searchBest(y1,y2):
se = pd.Series()
for i in range(0,102,2):
se = se.append(pd.Series(roc_auc_score(evals_y,(i*y1+(100-i)*y2)/100),index=[i]))
return se
ind = []
for co in evals_ypre.columns:
ind.append(int(roc_auc_score(evals_y,evals_ypre[co].values)*1000000))
col_sort_descend = list(pd.Series(evals_ypre.columns,index=ind).sort_index(ascending=False).values)
auc = [(pd.Series(evals_ypre.columns,index=ind).sort_index(ascending=False).index[0]).astype(int)/1000000]
import time
num = 0
for co in col_sort_descend:
if num==0:
evals_ypre_ronghe = evals_ypre[co].values
ytest_ronghe = ytest[co].values
print(num+1,auc[0])
print('\n')
del ytest[co]
del evals_ypre[co]
else:
s = time.time()
print(num+1)
se = searchBest(evals_ypre_ronghe,evals_ypre[co].values)
print(se.sort_values(ascending=False).head(1))
auc.append(se.sort_values(ascending=False).values[0])
k = se.sort_values(ascending=False).index[0]
evals_ypre_ronghe = (evals_ypre_ronghe*k+evals_ypre[co].values*(100-k))/100
ytest_ronghe = (ytest_ronghe*k+ytest[co].values*(100-k))/100
print(evals_ypre_ronghe.mean())
print(ytest_ronghe.mean())
print(roc_auc_score(evals_y,evals_ypre_ronghe))
print(int(time.time()-s),"s")
print('\n')
del ytest[co]
del evals_ypre[co]
num+=1
pd.Series(evals_ypre_ronghe).to_csv('data_preprocessing/evals_ypre_p.csv',index=False)
sub = pd.read_csv('data_preprocessing/aid_uid_test2_p.csv')
print(sub.head())
sub['score'] = round(pd.Series(ytest_ronghe),6).values
print(sub['score'].describe())
sub.to_csv('data_preprocessing/submission2_p.csv',index=False)
print(sub.head())
auc = pd.Series(auc)
auc.index = auc.index+1
print(auc) | [
"327643958@qq.com"
] | 327643958@qq.com |
70341145fe084a8b9c89b778e44d080d3d3d3a69 | dbbdf35bff726681ae34ad08eeda5f30929e2ae9 | /supervised_learning/0x01-multiclass_classification/0-main.py | a35603a1520673a30e039e11a57e311dc3627467 | [] | no_license | jorgezafra94/holbertonschool-machine_learning | 0b7f61c954e5d64b1f91ec14c261527712243e98 | 8ad4c2594ff78b345dbd92e9d54d2a143ac4071a | refs/heads/master | 2023-02-03T20:19:36.544390 | 2020-12-21T21:49:10 | 2020-12-21T21:49:10 | 255,323,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | #!/usr/bin/env python3
import numpy as np
oh_encode = __import__('0-one_hot_encode').one_hot_encode
lib = np.load('./data/MNIST.npz')
Y = lib['Y_train'][:10]
print(Y)
Y_one_hot = oh_encode(Y, 10)
print(Y_one_hot)
| [
"947@holbertonschool.com"
] | 947@holbertonschool.com |
69bcab88916383854717fab301464b88af93fec8 | 7f20b1bddf9f48108a43a9922433b141fac66a6d | /csplugins/trunk/ucsd/rsaito/rs_Progs/rs_Python/rs_Python_Pack/trunk/CellCyc_Packages/ANALYSIS_SCR_CC1/path_exp_detail2_cgi1.py | 49e7fc575e838a83933ac7afa41270fea16b49d4 | [] | no_license | ahdahddl/cytoscape | bf783d44cddda313a5b3563ea746b07f38173022 | a3df8f63dba4ec49942027c91ecac6efa920c195 | refs/heads/master | 2020-06-26T16:48:19.791722 | 2013-08-28T04:08:31 | 2013-08-28T04:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,794 | py | #!/usr/bin/env python
import os, sys
import datetime
import os
import socket
hostname = socket.gethostname()
curr = datetime.datetime.today()
t_stamp = curr.strftime("%Y%m%d%H%M%S")
pid = os.getpid()
fstamp = "%s_%s-%s" % (hostname, pid, t_stamp)
""" This is machine-specific settings. """
add_path = [
"/Users/rsaito/UNIX/Work/Research/rs_Progs/rs_Python",
"/Users/rsaito/UNIX/Work/Research/rs_Progs/rs_Python/rs_Python_Pack",
"/Users/rsaito/UNIX/Work/Research/rs_Progs/rs_Python/rs_Python_Pack/General_Packages"
]
""" ---------------------------------- """
for path in add_path:
if path not in sys.path:
sys.path.append(path)
if 'PYTHON_RS_CONFIG' not in os.environ:
os.environ['PYTHON_RS_CONFIG'] = '/Users/rsaito/UNIX/Work/Research/rs_Progs/rs_Python/rs_Python_Config'
os.environ['home'] = "/Users/rsaito"
os.environ['HOME'] = "/Users/rsaito"
from WEB.CGI_BasicI import cgi_html_out
import Usefuls.rsConfig
rsc_cellcyc = Usefuls.rsConfig.RSC_II("rsCellCyc_Config")
import CellCyc_Packages.CellCyc_Path.Path_Expr_detail2 as Path_expr_detail
Node_None_Mark = '-'
import cgi
form = cgi.FieldStorage()
if ("Start_Node" in form
and form["Start_Node"].value != ""
and (not form["Start_Node"].value.isspace())):
start_node_name = form["Start_Node"].value.replace(" ", "")
else:
start_node_name = "CyclinD"
if ("Goal_Node" in form
and form["Goal_Node"].value != ""
and (not form["Goal_Node"].value.isspace())):
goal_node_name = form['Goal_Node'].value.replace(" ", "")
else:
goal_node_name = "E2F1_DP"
if ("Mediate_Node" in form
and form["Mediate_Node"].value != ""
and (not form["Mediate_Node"].value.isspace())):
mediate_node_name = form['Mediate_Node'].value.replace(" ", "")
else:
mediate_node_name = Node_None_Mark
if ("Extra_Steps" in form
and form["Extra_Steps"].value != ""
and (not form["Extra_Steps"].value.isspace())):
extra_steps = int(form['Extra_Steps'].value) + 1
else:
extra_steps = 3
if ("Expr_Data" in form
and form["Expr_Data"].value != ""
and (not form["Expr_Data"].value.isspace())):
expr_data = form['Expr_Data'].value.replace(" ", "")
else:
expr_data = "Simons"
if ("Judge_On" in form
and form["Judge_On"].value != ""
and (not form["Judge_On"].value.isspace())):
judge_on_formula_str = form['Judge_On'].value.replace(" ", "")
else:
judge_on_formula_str = None
if ("TS_Path_Count" in form
and form[ "TS_Path_Count" ].value == "ON"):
ts_path_count = True
else:
ts_path_count = False
if mediate_node_name == Node_None_Mark:
via_node_name = None
else:
via_node_name = mediate_node_name
if judge_on_formula_str:
judge_on_formula_o = judge_on_formula_str
else:
judge_on_formula_o = "(Default)"
if expr_data == "Botstein":
path_detail = Path_expr_detail.Path_Expr_detail_Botstein(start_node_name,
goal_node_name,
extra_steps,
via_node_name,
judge_on_formula_str)
else:
path_detail = Path_expr_detail.Path_Expr_detail_Simons(start_node_name,
goal_node_name,
extra_steps,
via_node_name,
judge_on_formula_str)
exp_path = path_detail.output_exp_path() # Pathway calculation with all node on.
path_detail_all_on = path_detail.get_path_search()
goal_path_info = path_detail_all_on.get_info_goal_path()
goal_path_each_info = path_detail_all_on.get_info_I()
if ts_path_count:
count_path = path_detail.output_count_paths_each_node() # Previous pathway calculations stored in path_detail will be invalid.
else:
count_path = "Calculation disabled."
fw = open(rsc_cellcyc.Cell_cyc_path_analysis_result1, "w")
fw.write(goal_path_each_info)
fw.close()
fw = open(rsc_cellcyc.Cell_cyc_path_analysis_result2, "w")
fw.write(count_path)
fw.close()
fw = open(rsc_cellcyc.Cell_cyc_path_analysis_result3, "w")
fw.write(exp_path)
fw.close()
html = open(rsc_cellcyc.Cell_cyc_path_html_analysis_result).read()
html = html.replace("[[StartNode]]", start_node_name)
html = html.replace("[[GoalNode]]", goal_node_name)
html = html.replace("[[MediateNode]]", mediate_node_name)
html = html.replace("[[ExtraSteps]]", `extra_steps - 1`)
html = html.replace("[[ExprData]]", expr_data)
html = html.replace("[[JudgeOnFormula]]", judge_on_formula_o)
print cgi_html_out(html)
# print fstamp
| [
"rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5"
] | rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5 |
1028f56bd45a08a52c24ac51d50329d99495df77 | 1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c | /paws/bin/euca-delete-customer-gateway | e6416d738232615592568debc50ca946b75bdde1 | [
"MIT"
] | permissive | cirobessa/receitas-aws | c21cc5aa95f3e8befb95e49028bf3ffab666015c | b4f496050f951c6ae0c5fa12e132c39315deb493 | refs/heads/master | 2021-05-18T06:50:34.798771 | 2020-03-31T02:59:47 | 2020-03-31T02:59:47 | 251,164,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | #!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
# EASY-INSTALL-SCRIPT: 'euca2ools===3.4.1-2-g6b3f62f2','euca-delete-customer-gateway'
__requires__ = 'euca2ools===3.4.1-2-g6b3f62f2'
__import__('pkg_resources').run_script('euca2ools===3.4.1-2-g6b3f62f2', 'euca-delete-customer-gateway')
| [
"cirobessa@yahoo.com"
] | cirobessa@yahoo.com | |
e5cea628d06e21a644c0264a8a4ac0906fc0c88a | 267c3a8cec5b3c265386195117f845670101129a | /migrations/versions/8eabbe1a6efe_.py | b62a110df8d0d60d6a22f2c4bb203eabb0d35a32 | [
"MIT"
] | permissive | Maxcutex/pm_api | a0dee8795d6457aaf88f868c5d4179f78ef18df3 | de1e1a07feecd1be7bb86a87b4ffed012a05aec0 | refs/heads/development | 2023-02-25T00:43:48.545547 | 2021-02-03T12:46:07 | 2021-02-03T12:46:07 | 323,669,431 | 0 | 0 | MIT | 2021-01-31T18:45:08 | 2020-12-22T15:52:24 | Python | UTF-8 | Python | false | false | 1,688 | py | """empty message
Revision ID: 8eabbe1a6efe
Revises: 8fd3f43355d4
Create Date: 2021-01-20 12:51:28.124210
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "8eabbe1a6efe"
down_revision = "8fd3f43355d4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user_education", sa.Column("accomplishments", sa.Text(), nullable=True)
)
op.alter_column(
"user_employments", "accomplishments", existing_type=sa.TEXT(), nullable=True
)
op.alter_column(
"user_employments",
"institution_size",
existing_type=sa.VARCHAR(),
nullable=True,
)
op.alter_column(
"user_employments", "institution_url", existing_type=sa.VARCHAR(), nullable=True
)
op.alter_column(
"user_employments", "work_summary", existing_type=sa.TEXT(), nullable=True
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user_employments", "work_summary", existing_type=sa.TEXT(), nullable=False
)
op.alter_column(
"user_employments",
"institution_url",
existing_type=sa.VARCHAR(),
nullable=False,
)
op.alter_column(
"user_employments",
"institution_size",
existing_type=sa.VARCHAR(),
nullable=False,
)
op.alter_column(
"user_employments", "accomplishments", existing_type=sa.TEXT(), nullable=False
)
op.drop_column("user_education", "accomplishments")
# ### end Alembic commands ###
| [
"ennyboy@gmail.com"
] | ennyboy@gmail.com |
2223827d386d006f82e5ad889a8e61d87c1bb666 | cbfd2f8cbc31573ab54455f3f5398dffcf13f2af | /SwiftBOT/swiftBot.py | f44a35554ac049bc03b3a11b2785fdcacabd092e | [] | no_license | Andreyglass1989/Swift_Avia_Group | ccaf9f1a9bdff8fd9837345ba45915814b17a2a4 | 2c89e170a0fe9a5401fd15552c85c5d73abe1c21 | refs/heads/master | 2022-07-27T09:42:29.863236 | 2019-07-18T04:17:54 | 2019-07-18T04:17:54 | 102,249,849 | 0 | 0 | null | 2022-07-15T20:30:50 | 2017-09-03T08:04:54 | HTML | UTF-8 | Python | false | false | 1,180 | py | # _*_ coding: utf-8 _*_
import telebot
import const
bot = telebot.TeleBot(const.token)
upd = bot.get_updates()
last_upd=upd[-1]
message_from_user = last_upd.message
print(bot.get_me())
def log(message, answer):
print("\n -------------")
from datetime import datetime
print(datetime.now())
print("Сообщение от {0} {1}. (id = {2}) \n Текст - {3}".format(message.from_user.first_name,
message.from_user.last_name,
str(message.from_user.id),
message.text))
print(answer)
@bot.message_handler(commands=['help'])
def handle_text(message):
bot.send_message(message.chat.id, """ Мои возможности весьма ограничены """)
@bot.message_handler(content_types=['text'])
def handler_text(message):
answer = u"ты не умеешь играть"
if message.text == 'a':
answer = u"Б"
bot.send_message(message.chat.id, answer)
log(message, answer)
elif message.text == 'b':
answer = u"A"
bot.send_message(message.chat.id, answer)
log(message, answer)
else:
bot.send_message(message.chat.id, answer)
log(message, answer)
bot.polling(none_stop=True, interval=0) | [
"1989andreyglass@gmail.com"
] | 1989andreyglass@gmail.com |
337c74e511b8b9c443f887109392943d6df43ace | 5cc954e27fd924da0f6f44e7d58691d612a77f80 | /coremltools/converters/mil/mil/program.py | 0689566850dd6563a89f4bfaccb689a93b9e25de | [
"BSD-3-Clause"
] | permissive | 1duo/coremltools | e25f1a8423ec368bf1e7dabfaa36e77952578e79 | 37e619d99bf603d2cb9ea0839fa3ebe649996b0a | refs/heads/master | 2021-07-15T08:48:51.930217 | 2020-07-27T20:58:33 | 2020-07-27T20:58:33 | 203,466,876 | 2 | 0 | BSD-3-Clause | 2020-07-22T00:05:02 | 2019-08-20T22:59:50 | Python | UTF-8 | Python | false | false | 5,760 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import logging as _logging
import numpy as _np
import sympy as _sm
from . import types
from .block import Function
from .var import Var
from .types.symbolic import k_used_symbols, k_num_internal_syms
from coremltools.converters.mil.input_types import InputType
class Program(object):
def __init__(self):
self.main_input_types = {}
self.functions = {}
self.parameters = {}
def add_function(self, name, ssa_func):
if not isinstance(ssa_func, Function):
raise ValueError("Only Function can be added to Program.")
self.functions[name] = ssa_func
def add_parameters(self, name, ssa_val):
raise NotImplementedError()
def set_main_input_types(self, inputs):
if not isinstance(inputs, tuple):
raise ValueError("main inputs should be tuple of TensorType or ImageType")
elif not all([isinstance(inp, InputType) for inp in inputs]):
raise ValueError("main inputs should be tuple of InputSpec")
self.main_input_types = inputs
def find_ops(self, prefix=None, op_type=None, exactly_one=False):
"""
Return list of ops with name matching `prefix` if specified, and
op_type, if specified. At least one of {prefix, op_type} must be
specified.
If `exactly_one` == True, raise ValueError if we find <1 or >1 ops satisfying
the criteria.
prefix: str
Return list[Operation]. Empty list if no op satisfies.
"""
found_ops = []
for f_name, f in self.functions.items():
found_ops.extend(f.find_ops(prefix=prefix, op_type=op_type))
if exactly_one and len(found_ops) != 1:
msg = "Found matching ops not exactly one. Found ops: {}"
raise ValueError(msg.format(found_ops))
return found_ops
def validate(self):
for f_name, f in self.functions.items():
f.validate()
def __getitem__(self, func_name):
if func_name not in self.functions:
msg = "Function {} not found in among functions {}."
raise KeyError(msg.format(func_name, self.functions.keys()))
return self.functions[func_name]
def __repr__(self):
return self.__str__()
def __str__(self):
s = ""
for f_name, f in self.functions.items():
s += f.to_str(f_name)
return s
class Placeholder(object):
counter = 0
def __init__(self, sym_shape, dtype=None, name=None):
"""
sym_shape: () or [] for scalar. list, tuple, np.ndarray for tensor. May
contain Symbol as symbolic shape (but not string).
dtype: types.float or other scalar builtin types.
"""
if not isinstance(sym_shape, (list, tuple, _np.generic, _np.ndarray)):
raise ValueError("Illegal shape for Placeholder: {}".format(sym_shape))
self.sym_shape = sym_shape
self.dtype = dtype
if self.dtype is None:
self.dtype = types.float
sym_type = self.type_inference()
# Globally unique var name for placeholders
name = "placeholder_" + str(self.__class__.counter)
self.__class__.counter += 1
# List of output vars (consistent w/ other ops)
self.outputs = [Var(name, sym_type)]
def set_name(self, name):
self.name = name
self.outputs[0].name = name
def type_inference(self):
if len(self.sym_shape) == 0:
return self.dtype
return types.tensor(self.dtype, self.sym_shape)
def __str__(self):
return str(self.outputs[0])
def get_new_variadic_symbol():
global k_num_internal_syms
s = Symbol("*is" + str(k_num_internal_syms))
k_num_internal_syms += 1
return s
def get_new_symbol(name=None):
"""
Returns a new symbol, optionally named.
name: str (optional)
Optional name that provides more readability. If the name specified is
not available, an extra integer will be appended.
"""
global k_used_symbols
global k_num_internal_syms
if name is not None:
s = Symbol(name)
if s in k_used_symbols:
new_name = name + k_num_internal_syms
msg = 'Symbol name "{}" already occupied. Renaming to {}'
_logging.warning(msg.format(name, new_name))
s = Symbol(new_name)
else:
s = Symbol("is" + str(k_num_internal_syms))
k_num_internal_syms += 1
return s
class Symbol(_sm.Symbol):
def __init__(self, sym_name):
"""
Essentially sympy.Symbol representing an i32 value in shape.
sym_name: str. If first character is *, then this symbol represents
variadic rank. Otherwise the symbol name should start with a alpha
character. `sym_name` must be unique if specified, or it'd be auto
generated (to a non-variadic symbol). Furthermore, sym_name may not
start with 'is' (internal symbol)
"""
if not (sym_name[0].isalpha() or sym_name[0] == "*"):
msg = "Symbol name must start with a letter or *. Got {}"
raise ValueError(msg.format(sym_name))
global k_used_symbols
if sym_name in k_used_symbols:
msg = "Symbol `{}` is used already."
raise ValueError(msg.format(sym_name))
k_used_symbols.add(sym_name)
self.name = sym_name
| [
"noreply@github.com"
] | 1duo.noreply@github.com |
e91a69c4fe7c2e0648061c0b74a6ef9c6fb37d4a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s202062597.py | 91209fd2d4cb533f7a961da354921ab88942e8d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | n = input()
ans = 0
if 1 <= int(n[0:2]) <= 12 and 0 <= int(n[2:4]) <= 99:
ans += 1
if 0 <= int(n[0:2]) <= 99 and 1 <= int(n[2:4]) <= 12:
ans += 3
if ans == 1:
print('MMYY')
elif ans == 3:
print('YYMM')
elif ans == 4:
print('AMBIGUOUS')
else:
print('NA') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
01e1d2863a78b84b93853637110561ece1a937a5 | 0b8ae7a9bc185b76c64cb353808da7713c4b8ecd | /baekjoon/[DP]/기초/[DP]*연속합 2.py | 03d8671307af02ce94d965a225dff4d6d3ce8e09 | [] | no_license | snowedev/baekjoon-code.plus | b5f468b99fa70b00521657122f46cef575cabb9b | 2f983a9c559e803f6dcdeb549aa8d304ff5a664c | refs/heads/master | 2023-04-05T22:22:01.652744 | 2021-04-14T16:11:57 | 2021-04-14T16:11:57 | 357,951,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # 연속합 2 # B_1912
"""
# 연속합을 구하고 수 하나를 제거 할 수 있음 그때의 최댓값
# dl[i] = 왼쪽에서부터 구한 연속합 정답
# dr[i] = 오른쪽에서부터 구한 연속합 정답
# 각각의 제거할 수 k에 대해서 dl[k-1]+dr[k+1]의 최대값이 정답이 된다
"""
n = int(input())
a = list(map(int, input().split()))
d = [0]*n
dr = [0]*n
for i in range(n):
d[i] = a[i]
if i == 0:
continue
if d[i] < d[i-1] + a[i]:
d[i] = d[i-1] + a[i]
for i in range(n-1,-1,-1):
dr[i] = a[i]
if i == n-1:
continue
if dr[i] < dr[i+1] + a[i]:
dr[i] = dr[i+1] + a[i]
ans = max(d)
for i in range(1, n-1):
if ans < d[i-1] + dr[i+1]:
ans = d[i-1] + dr[i+1]
print(ans) | [
"wurijang@gmail.com"
] | wurijang@gmail.com |
60f9a6306c9eaed00e20d36667348e05e895812c | 721e7657258a30065067ddc5ca56480716853229 | /imitation/mario/imitation_model_darla.py | ee1695d1454d6bf88d448f5f56bbc9790cf6c8e3 | [] | no_license | buoyancy99/sap | c40f2a9dd81f23cc7f69cabccd374730a374c28a | 59293c28477350217c8b7967e5353bded81edc92 | refs/heads/master | 2023-05-05T01:09:58.824618 | 2021-05-02T21:42:46 | 2021-05-02T21:42:46 | 363,755,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | import tensorflow as tf
import os
from tensorflow.keras import Input, layers
from imitation.mario.dataloader_darla import imitation_dataset
from config.mario_config import config
import math
import numpy as np
from DARLA.mario.beta_vae.model import Model as Beta_VAE_Net
import torch
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
gpu_config.log_device_placement = True
sess = tf.Session(config=gpu_config)
action_space = len(config["movement"])
trained_on = config["trained_on"]
class imitation_model:
def __init__(self, model_path='imitation/mario/ckpts', mode='eval'):
self.model_path = os.path.join(model_path, 'imitation_{}_darla'.format(trained_on))
self.mode = mode
self.model = self.make_model()
if mode == 'eval':
print('loading trained model')
self.model.load_weights(self.model_path)
self.vae = Beta_VAE_Net().cuda()
self.vae.load_state_dict(torch.load('DARLA/mario/beta_vae/ckpts/latest.model')['model_state_dict'])
for param in self.vae.parameters():
param.requires_grad = False
print('model loaded')
def make_model(self):
if self.mode == 'train':
dataset = imitation_dataset('{}/Datasets/MarioImitationDarla'.format(os.getenv('DATASET_ROOT')))
obs_input, action_gt = dataset.get_next()
obs_input = Input(tensor=obs_input)
elif self.mode == 'eval':
obs_input = Input(shape=[128])
obs_feature = layers.Dense(128, activation='relu')(obs_input)
obs_feature = layers.Dense(64, activation='relu')(obs_feature)
action_out = layers.Dense(action_space, activation='softmax')(obs_feature)
model = tf.keras.Model(inputs=[obs_input], outputs=[action_out])
if self.mode == 'train':
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
target_tensors=[action_gt]
)
else:
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
def train(self, epochs=2):
print(self.model.summary())
def scheduler(epoch):
if epoch < 1:
return 0.0005
else:
return 0.0005 * math.exp(- 0.3 * epoch)
callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
self.model.fit(epochs=epochs, steps_per_epoch=100000, callbacks=[callback])
self.model.save_weights(self.model_path)
def predict(self, obs):
obs = torch.from_numpy(obs[:, :, :, 1:]).float().cuda()
obs = obs.permute(0, 3, 1, 2)
obs = torch.nn.functional.interpolate(obs, size=(64, 64), mode='bilinear')
encoding = self.vae.encode(obs).detach().cpu().numpy()
actions_dist = self.model.predict(encoding)
actions = np.argmax(actions_dist, 1)
return actions, actions_dist
if __name__ == "__main__":
model = imitation_model(model_path = 'imitation/mario/ckpts', mode = 'train')
model.train()
| [
"boyuanchen@berkeley.edu"
] | boyuanchen@berkeley.edu |
b568540a167c7650a7ce95de53e1f66cc2f28abf | d581b6a47d6323ce9dcabf17d40c8ac8acfd8cb9 | /lab_takip_programi/tponaybekleyenui.py | 9b93da5ad57e68e2c9a9af9d0857cd14c6211d70 | [] | no_license | sinanurun/Lab_Takip_Program-_PYQT5 | f9b6c8f0108750e939c2d66ac8ed41a30485fd8d | cf8abb968eed65a6559242e4980b1d963d39d9d0 | refs/heads/main | 2023-05-10T10:12:25.086705 | 2021-06-08T09:13:14 | 2021-06-08T09:13:14 | 326,639,874 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'tponaybekleyen.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Onaybekleyen(object):
def setupUi(self, Onaybekleyen):
Onaybekleyen.setObjectName("Onaybekleyen")
Onaybekleyen.resize(905, 601)
self.tableWidget = QtWidgets.QTableWidget(Onaybekleyen)
self.tableWidget.setGeometry(QtCore.QRect(30, 130, 821, 421))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(8)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(7, item)
self.label = QtWidgets.QLabel(Onaybekleyen)
self.label.setGeometry(QtCore.QRect(130, 70, 601, 71))
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.retranslateUi(Onaybekleyen)
QtCore.QMetaObject.connectSlotsByName(Onaybekleyen)
def retranslateUi(self, Onaybekleyen):
_translate = QtCore.QCoreApplication.translate
Onaybekleyen.setWindowTitle(_translate("Onaybekleyen", "Onay Bekleyen Talepler"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Onaybekleyen", "Talep Id"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Onaybekleyen", "Talep Adi"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Onaybekleyen", "Açılış Tarihi"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("Onaybekleyen", "Talep Önemi"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("Onaybekleyen", "Talep Tanımı"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("Onaybekleyen", "Talep Notu"))
item = self.tableWidget.horizontalHeaderItem(6)
item.setText(_translate("Onaybekleyen", "Talep Güncelle"))
item = self.tableWidget.horizontalHeaderItem(7)
item.setText(_translate("Onaybekleyen", "Açığa Al"))
self.label.setText(_translate("Onaybekleyen", "Onay Bekleyen Talepler"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Onaybekleyen = QtWidgets.QWidget()
ui = Ui_Onaybekleyen()
ui.setupUi(Onaybekleyen)
Onaybekleyen.show()
sys.exit(app.exec_())
| [
"sinanurun24@gmail.com"
] | sinanurun24@gmail.com |
dc30b3755bee1ccd1507fe3f36fa7d9d69806ea7 | bed77414283c5b51b0263103ec5055fa70e7ee3a | /accounts/migrations/0006_remove_user_change_username.py | afad0f761630e7d072c2175c69591eafc737dadb | [
"Apache-2.0"
] | permissive | UniversitaDellaCalabria/IdM | a24535156d8ee1f416aec0c0844fbc3e39e08280 | 0c80bc1a192e8f3075c941ca2d89773bca25e892 | refs/heads/master | 2020-12-20T09:09:12.320470 | 2020-07-23T03:43:48 | 2020-07-23T03:43:48 | 236,024,963 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Generated by Django 2.2.10 on 2020-02-19 12:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_user_change_username'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='change_username',
),
]
| [
"francesco.filicetti@unical.it"
] | francesco.filicetti@unical.it |
4cd5f82429b1ab17aeddeecb83cdfba47b20f6d8 | 95fcf7ebe0fa31d93a5792ec2970096840be2fa0 | /submit-blocking_cesar.py | f0be601f7d350cbc4f5c6d3aa5c713fd4f10cd82 | [] | no_license | alphaparrot/rossby-blocking | d40036ff46a590cff115bca5cd65dae20b3872ca | a04beb0ff406f92002cd572387bc1ab23ccf5c48 | refs/heads/master | 2020-03-20T02:28:15.805933 | 2018-07-10T21:29:32 | 2018-07-10T21:29:32 | 137,114,260 | 0 | 0 | null | 2018-06-15T20:32:15 | 2018-06-12T18:48:57 | Jupyter Notebook | UTF-8 | Python | false | false | 1,629 | py | import os
import numpy as np
_BATCHSCRIPT = ("#!/bin/bash \n"+
"#SBATCH --job-name=%s \n"+
"#SBATCH --output=%%j_%s.out \n"+
"#SBATCH --error=%%j_%s.err \n"+
"#SBATCH --ntasks=1 \n"+
"#SBATCH --mem-per-cpu=2000M \n"+
"#SBATCH --account=rossby \n"
"#SBATCH --partition=broadwl \n"+
"#SBATCH --time=16:00:00 \n"+
"module load python \n"+
"cd %s \n")
RUNSTRING = "python noisyblocking.py %d %d %f %f %s \n"
if __name__=="__main__":
niters = 96
nperjob = 16
ntasks = niters//nperjob
namps = 10
for cpeak in np.logspace(-1,np.log10(2.0),num=namps):
for i in range(0,ntasks):
ndir = "cstrength_%1.4f_%d"%(cpeak,i)
os.system("mkdir "+ndir)
f=open(ndir+"/runblocking","w")
name = ndir
txt = _BATCHSCRIPT%(name,name,name,os.getcwd()+"/"+ndir)
txt += RUNSTRING%(nperjob,2,cpeak,1.8,name)
f.write(txt)
f.close()
os.system("cp *.py "+ndir+"/")
os.system("cd "+ndir+" && sbatch runblocking && cd ../")
| [
"paradise@astro.utoronto.ca"
] | paradise@astro.utoronto.ca |
72cd54b8e63ed329a60e271ac6da1a22f15e97e7 | 0f599419483474add8e4fbb7b134708f321315f8 | /script/sitemap.py | 9ebac390103d83e8608ead370433e425bc2e49ba | [
"MIT"
] | permissive | xiaoxiae/Robotics-Simplified-Website | 32889bc7cb1fd04bc2be6d131e3e06a682646fa5 | ab6c683695ecb758f746923bee3c2355ffbc76b9 | refs/heads/master | 2022-10-31T18:24:37.696313 | 2020-09-05T08:12:35 | 2020-09-05T08:12:35 | 161,540,686 | 4 | 0 | MIT | 2022-10-06T03:23:02 | 2018-12-12T20:23:15 | CSS | UTF-8 | Python | false | false | 2,198 | py | """Generates a sitemap.xml file for the website."""
from regex import sub, search, MULTILINE
from datetime import datetime
from os import path, makedirs
from modules.structure import get_docs_structure
def writeURL(url, subpath, file, lastmod = None):
"""Writes information about a specific URL to a file."""
# converts the file subpath to a website path
converted_subpath = subpath.replace(".md", "/")[8:].replace("\\", "/")
# if it's the main topic article, remove the repeating name
converted_subpath = sub(r"(.+)\/\1\/$", "\g<1>/", converted_subpath)
# priority is the deeper the page is
priority = str(1.0 - converted_subpath.count("/") * 0.2)
# creates the url
address = url + "/" + converted_subpath
# if lastmod wasn't specified, generate it from the path
if lastmod == None:
lastmod = (datetime\
.utcfromtimestamp(path.getmtime(subpath))\
.strftime('%Y-%m-%dT%H:%M:%S+00:00'))
# the contents of the xml record, in a list
contents = ["<url>",
"\t<loc>" + address + "</loc>",
"\t<lastmod>" + lastmod + "</lastmod>",
"\t<priority>" + priority + "</priority>",
"</url>"]
for line in contents:
file.write(line + "\n")
# create folder where sitemap.xml will be
if not path.exists(path.join("..", "_site")):
makedirs(path.join("..", "_site"))
# open the file and write the beginning
sitemap = open(path.join("..", "_site", "sitemap.xml"), "w")
sitemap.write(open(path.join("genfiles", "beginning.xml"), "r").read())
# get the url of the website from _config.yml
url = search("^url: \"(.+)\".*\n",\
open(path.join("..", "_config.yml"), "r").read(), MULTILINE).group(1)
# write information about the main page
writeURL(url, "", sitemap,\
datetime.utcfromtimestamp(path.getmtime(path.join("..", "index.md")))\
.strftime('%Y-%m-%dT%H:%M:%S+00:00'))
# write each article in docs/
for subpath in get_docs_structure():
writeURL(url, path.join("..", "docs", subpath), sitemap)
# write the ending and close the file
sitemap.write(open(path.join("genfiles", "ending.xml"), "r").read())
sitemap.close()
| [
"tomas.slama.131@gmail.com"
] | tomas.slama.131@gmail.com |
76752e4b7300de984efc52757142849e05a8ce72 | e2e39726195c7bc075b9bd56e757acd136527d5c | /typings/vtkmodules/vtkInteractionWidgets/vtkResliceCursorThickLineRepresentation.pyi | 303864ac552c01b3a1d0033ab684e7de427a85ff | [
"BSD-3-Clause"
] | permissive | gen4438/vtk-python-stubs | a652272183d2d1ee48d4639e86bcffc1ac454af0 | c9abd76362adf387af64ce5ddbd04c5d3bebe9da | refs/heads/main | 2023-04-04T02:13:15.459241 | 2021-04-15T10:47:28 | 2021-04-15T10:53:59 | 358,224,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | pyi | """
This type stub file was generated by pyright.
"""
from .vtkResliceCursorLineRepresentation import vtkResliceCursorLineRepresentation
class vtkResliceCursorThickLineRepresentation(vtkResliceCursorLineRepresentation):
"""
vtkResliceCursorThickLineRepresentation - represents a thick slab of
the reslice cursor widget
Superclass: vtkResliceCursorLineRepresentation
This class respresents a thick reslice cursor, that can be used to
perform interactive thick slab MPR's through data. The class
internally uses vtkImageSlabReslice to do its reslicing. The slab
thickness is set interactively from the widget. The slab resolution
(ie the number of blend points) is set as the minimum spacing along
any dimension from the dataset.
@sa
vtkImageSlabReslice vtkResliceCursorLineRepresentation
vtkResliceCursorWidget
"""
def CreateDefaultResliceAlgorithm(self):
"""
V.CreateDefaultResliceAlgorithm()
C++: void CreateDefaultResliceAlgorithm() override;
INTERNAL - Do not use Create the thick reformat class. This
overrides the superclass implementation and creates a
vtkImageSlabReslice instead of a vtkImageReslice.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Standard VTK methods.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Standard VTK methods.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Standard VTK methods.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Standard VTK methods.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkResliceCursorThickLineRepresentation
C++: vtkResliceCursorThickLineRepresentation *NewInstance()
Standard VTK methods.
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase)
-> vtkResliceCursorThickLineRepresentation
C++: static vtkResliceCursorThickLineRepresentation *SafeDownCast(
vtkObjectBase *o)
Standard VTK methods.
"""
...
def SetResliceParameters(self, p_float, p_float_1, p_int, p_int_1):
"""
V.SetResliceParameters(float, float, int, int)
C++: void SetResliceParameters(double outputSpacingX,
double outputSpacingY, int extentX, int extentY) override;
INTERNAL - Do not use Reslice parameters which are set from
vtkResliceCursorWidget based on user interactions.
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
| [
"g1e2n04@gmail.com"
] | g1e2n04@gmail.com |
dd1f4dfeee2260983bccb0b62a2571ac8b26595b | c772a7006b37787a343fa2811da975f5b6c5d5aa | /4-Behavioral Patterns/8-Chain of Responsibility Pattern/Usage 1-Only One Receiver Handles Request/Python/chain_of_responsibility_pattern_test.py | c6314ec584c6b15e15a84f97d86d02035832720d | [
"MIT"
] | permissive | Ziang-Lu/Design-Patterns | 99bcc16a724a08590de180dfceb78108c14e78ae | 7a8167a85456b481aba15d5eee5a64b116b00adc | refs/heads/master | 2021-06-10T12:36:09.383930 | 2021-03-25T07:58:14 | 2021-03-25T07:58:14 | 147,261,584 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | #!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Application that actually uses Chain of Responsibility Pattern.
In this example, ONLY ONE receiver in the chain handles the request.
"""
__author__ = 'Ziang Lu'
from employee import (
HumanResource, ManagementEmployee, ProjectLeader, TeamLeader
)
def _get_chain_of_responsibility() -> ManagementEmployee:
"""
Private helper function to get the chain of responsibility.
:return: ManagementEmployee
"""
team_leader = TeamLeader()
project_leader = ProjectLeader()
team_leader.set_supervisor(project_leader)
hr = HumanResource()
team_leader.set_supervisor(hr)
return team_leader
def main():
team_leader = _get_chain_of_responsibility()
team_leader.handle_developer_leave_request(developer='David',
requested_leave_days=9)
print()
team_leader.handle_developer_leave_request(developer='John',
requested_leave_days=18)
print()
team_leader.handle_developer_leave_request(developer='Steve',
requested_leave_days=30)
print()
team_leader.handle_developer_leave_request(developer='Rohan',
requested_leave_days=50)
if __name__ == '__main__':
main()
# Output:
# TeamLeader approves David for a 9-day leave.
#
# TeamLeader cannot approve a 18-day leave. The leave request is handled to HumanResource.
# HumanResource approves John for a 18-day leave.
#
# TeamLeader cannot approve a 30-day leave. The leave request is handled to HumanResource.
# HumanResource approves Steve for a 30-day leave.
#
# TeamLeader cannot approve a 50-day leave. The leave request is handled to HumanResource.
# HumanResource cannot approve a 50-day leave. No more supervisor. The leave request will be rejected.
| [
"ziangl@alumni.cmu.edu"
] | ziangl@alumni.cmu.edu |
3da17bdd0e7aee1fb6946aeaa4b3995fa9d9cdc3 | 3ff4934b3e3845eeb49110f7b73ed9e501e919ab | /try_myself/basic/try_cross_entropy.py | 724f000d2183150fa0a687ecc0d85f6690397a06 | [
"MIT"
] | permissive | giraffe-tree/play-tf | ef09e9b186874dfc99d44250358570227604b40a | 30f39f228d55fdeb35f1bd420b3bb29ecd3ade96 | refs/heads/master | 2020-04-01T13:17:11.818847 | 2018-10-31T05:14:31 | 2018-10-31T05:14:31 | 153,245,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | import tensorflow as tf
import numpy as np
batch_size = 8
# 两个输入节点
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
# 一个输出节点
y_ = tf.placeholder(tf.float32, shape=(None, 1), name="y-input")
# 单层前向传播, w1 为要计算的参数
w1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)
loss_less = 10
loss_more = 1
loss = tf.reduce_sum(tf.where(tf.greater(y, y_),
(y - y_) * loss_more,
(y_ - y) * loss_less))
# 设置衰减的学习率
# learning_rate, global_step, decay_steps, decay_rate, staircase = False
# learning_rate * decay_rate ^ (global_step / decay_steps)
# lr = 0.1
# for i in range(100, 300):
# print(lr)
# lr = lr * 0.96 ** (i / 100)
global_step = tf.Variable(200)
learning_rate = tf.train.exponential_decay(0.1, global_step, 100, 0.96, staircase=True)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 随机数生成一个模拟数据集
rdm = np.random.RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size, 2)
Y = [[x1 + x2 + rdm.rand() / 10.0 - 0.05] for (x1, x2) in X]
# print(X)
# print(Y)
# 训练
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
steps = 5000
for i in range(steps):
start = (i * batch_size) % dataset_size
end = min(start + batch_size, dataset_size)
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start: end]})
if i % 100 == 0:
print(sess.run(learning_rate))
print(sess.run(global_step))
print(sess.run(w1))
| [
"15355498770@163.com"
] | 15355498770@163.com |
b8c2ad85fabb9ffc0ff292144a15a3d30da2a70c | 46ef191ca0c170ca1d8afc5eb5134de52eba15f1 | /abc183/venv/Scripts/easy_install-script.py | ba8d6a9cbfec72a3a35c0fc9ac84a07b2f973249 | [] | no_license | anthonyouch/Competitive-Programming- | 9a84cd7ff4b816d2e7ece4e4d6438dbeb23f5795 | 39109a7be1cd007bd0080a9694ac256efc10eab9 | refs/heads/master | 2023-03-04T00:49:00.688118 | 2021-02-05T13:19:46 | 2021-02-05T13:19:46 | 334,131,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!"C:\Users\Anthony Ouch\PycharmProjects\atcoder183\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"anthonyouch.programmer@gmail.com"
] | anthonyouch.programmer@gmail.com |
f4b1e8a6ccd4c26a3ddd8289451e005c138ad0fb | 1a1e85633156d1079874fbc2b48d690635c622a6 | /game_collector_project/game_collector_project/wsgi.py | 7cc4100fe04bd267d7740efdc207641c6067738b | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-mini-project4-autumn-ricky | 5b67ed263f5fe6987bc0c9c93e077e07ea4610f5 | d4831e342b80c495a7fb3893f0cff7ccd1b72eb8 | refs/heads/master | 2020-04-27T01:36:52.427905 | 2019-03-12T14:36:06 | 2019-03-12T14:36:06 | 173,970,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for game_collector_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "game_collector_project.settings")
application = get_wsgi_application()
| [
"raglanda998@gmail.com"
] | raglanda998@gmail.com |
dfb9ffc6a2b60db025abac5e0c79b61054dd6cc5 | 88608583e66b2084a8fe010d18a4bc779a9ea8eb | /samples/senet/train.py | 58719276bb6f47505eee3049eff679e091a527d0 | [] | no_license | UdonDa/torcv | c4f1f1cac99d49a5fe0d3edef6293659d807f292 | 49e548d538933f5eb5a4ffe1cb529914b180dae2 | refs/heads/master | 2020-04-28T05:25:09.183032 | 2019-03-19T02:39:52 | 2019-03-19T02:39:52 | 175,019,302 | 1 | 0 | null | 2019-03-19T02:39:53 | 2019-03-11T14:37:18 | Python | UTF-8 | Python | false | false | 1,430 | py | import sys
sys.path.append('../../')
from torcv.links.model.senet.senet import senet154, se_resnet101, se_resnet50, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d
import torcv.utils.debug.save_image as save_image
from torcv.solver.cifar10.cifar10_solver import Ciffar10_Solver
from torcv.dataset.cifar10 import get_cifar10
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as T
import argparse
import matplotlib.pyplot as plt
import numpy as np
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def main(config):
transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_loader, test_loader = get_cifar10(download=False, transform=transform)
model = senet154()
solver = Ciffar10_Solver(config, train_loader=train_loader, test_loader=test_loader, model=model)
solver.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--optimizer', type=str, default='sgd')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--total_epochs', type=float, default=300)
parser.add_argument('--gpu_number', type=str, default='0')
config = parser.parse_args()
main(config) | [
"udoooon0727@gmail.com"
] | udoooon0727@gmail.com |
6b4808d6a072d256c8c5b4de951f541932e66557 | 9e3fd87c9959406b4b01bf2637c4abe9f61d6a11 | /learning/serializers.py | 3a176d09bd7cfce6b40ad398ca70a6df72b2d229 | [
"Apache-2.0"
] | permissive | CiganOliviu/MyWorkflow | 783cea33acf9599cc145249a1c317b3bff51befc | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | refs/heads/main | 2023-06-14T17:03:12.574165 | 2021-07-07T20:39:59 | 2021-07-07T20:39:59 | 382,697,387 | 0 | 0 | Apache-2.0 | 2021-07-03T19:43:59 | 2021-07-03T19:43:59 | null | UTF-8 | Python | false | false | 233 | py | from rest_framework import serializers
from learning.models import CurrentReadingBook
class CurrentReadingBooksSerializer(serializers.ModelSerializer):
class Meta:
model = CurrentReadingBook
fields = '__all__'
| [
"ciganoliviudavid@gmail.com"
] | ciganoliviudavid@gmail.com |
aeda01bf62b640143573e66fac836e7e1d3a0466 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/scl/SourceCodeLookupPlugin.pyi | f1764fd67471bfe00d55355c8c2f22be35b46071 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | pyi | from typing import List
import ghidra.app.plugin
import ghidra.framework.model
import ghidra.framework.options
import ghidra.framework.plugintool
import ghidra.framework.plugintool.util
import ghidra.program.model.listing
import ghidra.program.util
import java.lang
class SourceCodeLookupPlugin(ghidra.app.plugin.ProgramPlugin):
def __init__(self, __a0: ghidra.framework.plugintool.PluginTool): ...
def acceptData(self, __a0: List[ghidra.framework.model.DomainFile]) -> bool: ...
def dataStateRestoreCompleted(self) -> None: ...
def dependsUpon(self, __a0: ghidra.framework.plugintool.Plugin) -> bool: ...
def equals(self, __a0: object) -> bool: ...
def eventSent(self, __a0: ghidra.framework.plugintool.PluginEvent) -> None: ...
def firePluginEvent(self, __a0: ghidra.framework.plugintool.PluginEvent) -> None: ...
def getClass(self) -> java.lang.Class: ...
def getCurrentProgram(self) -> ghidra.program.model.listing.Program: ...
def getData(self) -> List[ghidra.framework.model.DomainFile]: ...
def getMissingRequiredServices(self) -> List[object]: ...
def getName(self) -> unicode: ...
def getPluginDescription(self) -> ghidra.framework.plugintool.util.PluginDescription: ...
@staticmethod
def getPluginName(__a0: java.lang.Class) -> unicode: ...
def getProgramHighlight(self) -> ghidra.program.util.ProgramSelection: ...
def getProgramLocation(self) -> ghidra.program.util.ProgramLocation: ...
def getProgramSelection(self) -> ghidra.program.util.ProgramSelection: ...
def getSupportedDataTypes(self) -> List[java.lang.Class]: ...
def getTool(self) -> ghidra.framework.plugintool.PluginTool: ...
def getTransientState(self) -> object: ...
def getUndoRedoState(self, __a0: ghidra.framework.model.DomainObject) -> object: ...
def hasMissingRequiredService(self) -> bool: ...
def hashCode(self) -> int: ...
def init(self) -> None: ...
def isDisposed(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def processEvent(self, __a0: ghidra.framework.plugintool.PluginEvent) -> None: ...
def readConfigState(self, __a0: ghidra.framework.options.SaveState) -> None: ...
def readDataState(self, __a0: ghidra.framework.options.SaveState) -> None: ...
def restoreTransientState(self, __a0: object) -> None: ...
def restoreUndoRedoState(self, __a0: ghidra.framework.model.DomainObject, __a1: object) -> None: ...
def serviceAdded(self, __a0: java.lang.Class, __a1: object) -> None: ...
def serviceRemoved(self, __a0: java.lang.Class, __a1: object) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def writeConfigState(self, __a0: ghidra.framework.options.SaveState) -> None: ...
def writeDataState(self, __a0: ghidra.framework.options.SaveState) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
96f423da48a50be8052166187c166b33ddadd3af | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/pycryptodomex-3.4.7/lib/Crypto/Hash/SHA512.py | 213d14c20385ad71338bcfcdbcd696feef823b4d | [
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Unlicense",
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 5,716 | py | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from Crypto.Util.py3compat import *
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_sha512_lib = load_pycryptodome_raw_lib("Crypto.Hash._SHA512",
"""
int SHA512_init(void **shaState);
int SHA512_destroy(void *shaState);
int SHA512_update(void *hs,
const uint8_t *buf,
size_t len);
int SHA512_digest(const void *shaState,
uint8_t digest[64]);
int SHA512_copy(const void *src, void *dst);
""")
class SHA512Hash(object):
"""A SHA-512 hash object.
Do not instantiate directly. Use the :func:`new` function.
:ivar oid: ASN.1 Object ID
:vartype oid: string
:ivar block_size: the size in bytes of the internal message block,
input to the compression function
:vartype block_size: integer
:ivar digest_size: the size in bytes of the resulting hash
:vartype digest_size: integer
"""
# The size of the resulting hash in bytes.
digest_size = 64
# The internal block size of the hash algorithm in bytes.
block_size = 128
# ASN.1 Object ID
oid = "2.16.840.1.101.3.4.2.3"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_sha512_lib.SHA512_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating SHA512"
% result)
self._state = SmartPointer(state.get(),
_raw_sha512_lib.SHA512_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string): The next chunk of the message being hashed.
"""
expect_byte_string(data)
result = _raw_sha512_lib.SHA512_update(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating SHA512"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_sha512_lib.SHA512_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating SHA512"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:return: A hash object of the same type
"""
clone = SHA512Hash()
result = _raw_sha512_lib.SHA512_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying SHA512" % result)
return clone
def new(self, data=None):
"""Create a fresh SHA-512 hash object."""
return SHA512Hash(data)
def new(data=None):
"""Create a new hash object.
:parameter data:
Optional. The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`SHA512Hash.update`.
:type data: byte string
:Return: A :class:`SHA512Hash` hash object
"""
return SHA512Hash().new(data)
# The size of the resulting hash in bytes.
digest_size = SHA512Hash.digest_size
# The internal block size of the hash algorithm in bytes.
block_size = SHA512Hash.block_size
| [
"yingchen@cloudera.com"
] | yingchen@cloudera.com |
7caf4d23a1c71b788a052223f9660404a10976d2 | 462aa17a08d3628c5ed9b87937a472ccf2128f2f | /setup.py | e44a71bb62d355b134aa9297c9764b5ae0e09bed | [
"Apache-2.0"
] | permissive | hsorby/pypipackagemanagement | 0ce3482b71bcc872d995a25b44125f06938a0e20 | 94696a97393d4f294740ffc5c7bb87db865994eb | refs/heads/master | 2020-05-04T05:04:25.268522 | 2019-04-04T01:33:05 | 2019-04-04T01:33:05 | 178,979,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import os
import re
import codecs
from setuptools import setup
# Single source the version
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='pypipackagemanagement',
version=find_version("src", "pypipackagemanagement", "__init__.py"),
packages=['pypipackagemanagement'],
package_dir={'': 'src'},
install_requires=['packaging'],
url='https://github.com/hsorby/pypipackagemanagement',
license='APACHE License',
author='Hugh Sorby',
author_email='h.sorby@auckland.ac.nz',
description='A package for managing and bumping versions of packages on PyPi.'
)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
af2a13a6039486cc408f02ae303461a685112a74 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-gke-multicloud/samples/generated_samples/gkemulticloud_v1_generated_aws_clusters_update_aws_cluster_sync.py | 3be6027a04dadaf6585708715308ec49341df032 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 2,972 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateAwsCluster
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-gke-multicloud
# [START gkemulticloud_v1_generated_AwsClusters_UpdateAwsCluster_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import gke_multicloud_v1
def sample_update_aws_cluster():
# Create a client
client = gke_multicloud_v1.AwsClustersClient()
# Initialize request argument(s)
aws_cluster = gke_multicloud_v1.AwsCluster()
aws_cluster.networking.vpc_id = "vpc_id_value"
aws_cluster.networking.pod_address_cidr_blocks = ['pod_address_cidr_blocks_value1', 'pod_address_cidr_blocks_value2']
aws_cluster.networking.service_address_cidr_blocks = ['service_address_cidr_blocks_value1', 'service_address_cidr_blocks_value2']
aws_cluster.aws_region = "aws_region_value"
aws_cluster.control_plane.version = "version_value"
aws_cluster.control_plane.subnet_ids = ['subnet_ids_value1', 'subnet_ids_value2']
aws_cluster.control_plane.iam_instance_profile = "iam_instance_profile_value"
aws_cluster.control_plane.database_encryption.kms_key_arn = "kms_key_arn_value"
aws_cluster.control_plane.aws_services_authentication.role_arn = "role_arn_value"
aws_cluster.control_plane.config_encryption.kms_key_arn = "kms_key_arn_value"
aws_cluster.authorization.admin_users.username = "username_value"
aws_cluster.fleet.project = "project_value"
request = gke_multicloud_v1.UpdateAwsClusterRequest(
aws_cluster=aws_cluster,
)
# Make the request
operation = client.update_aws_cluster(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END gkemulticloud_v1_generated_AwsClusters_UpdateAwsCluster_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
cd4c49ba831bd4142c3a648b806b4466aad16070 | b8ea631aae5d132c7b0236684d5f7c12d3c222be | /Library/Graph/SCC.py | 66cf48f32094baf8a1254b8b76e011f3baf42d9a | [] | no_license | Ryushi-tech/card3 | 68c429313142e58d4722a1cd5a4acc4ab39ca41f | 883636b2f518e38343a12816c5c641b60a87c098 | refs/heads/master | 2021-07-05T22:46:33.089945 | 2020-12-12T15:31:00 | 2020-12-12T15:31:00 | 209,176,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | import sys
input = lambda: sys.stdin.readline()
def SCC_Tarjan(g):
n = len(g)
order = [-1] * n # 負なら未処理、[0,n) ならpre-order, n ならvisited
low = [0] * n
ord_now = 0
parent = [-1] * n
gp = [0] * n
gp_num = 0
S = []
q = []
for i in range(n):
if order[i] == -1:
q.append(i)
while q:
v = q.pop()
if v >= 0:
if order[v] != -1: continue
order[v] = low[v] = ord_now
ord_now += 1
S.append(v)
q.append(~v)
for c in g[v]:
if order[c] == -1:
q.append(c)
parent[c] = v
else:
low[v] = min(low[v], order[c])
else:
v = ~v
if parent[v] != -1:
low[parent[v]] = min(low[parent[v]], low[v])
if low[v] == order[v]:
while True:
w = S.pop()
order[w] = n
gp[w] = gp_num
if w == v: break
gp_num += 1
rec = [[] for _ in range(gp_num)]
for i in range(n):
gp[i] = gp_num - gp[i] - 1
rec[gp[i]].append(i)
return gp_num, rec
n, m = map(int, input().split())
G = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
G[a].append(b)
label, GP = SCC_Tarjan(G)
print(label)
for gp in GP:
print(len(gp), *gp[::-1])
| [
"mryrys@gmail.com"
] | mryrys@gmail.com |
cbd48a405bdd6a3b7d34e001fd9570ea0e4a39b5 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/appconfiguration/azure-appconfiguration-provider/samples/key_vault_reference_provided_clients_sample.py | 2373222fe8b13b9fb77baca8e59046d0a1114073 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 1,210 | py | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from azure.appconfiguration.provider import (
load_provider,
AzureAppConfigurationKeyVaultOptions,
SettingSelector
)
from azure.keyvault.secrets import SecretClient
import os
from sample_utilities import get_authority, get_audience, get_credential
endpoint = os.environ.get("AZURE_APPCONFIG_ENDPOINT")
key_vault_uri = os.environ.get("AZURE_KEYVAULT_URI")
authority = get_authority(endpoint)
audience = get_audience(authority)
credential = get_credential(authority)
# Connection to Azure App Configuration using AAD with Provided Client
secret_client = SecretClient(vault_url=key_vault_uri, credential=credential)
selects = {SettingSelector("*", "prod")}
key_vault_options = AzureAppConfigurationKeyVaultOptions(secret_clients=[secret_client])
config = load_provider(endpoint=endpoint, credential=credential, key_vault_options=key_vault_options, selects=selects)
print(config["secret"])
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
2d9174ae0ba34039ccecf9d5c0c206dd6834c68d | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/tutorial/gui/scaleform/battle/layout.py | 596afb933d94e298926ee5db764b14987fb778c8 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 6,910 | py | # 2015.11.10 21:31:10 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/tutorial/gui/Scaleform/battle/layout.py
import weakref
from account_helpers.AccountSettings import AccountSettings
from gui import DEPTH_OF_Aim
from gui.Scaleform.Flash import Flash
from gui.app_loader import g_appLoader
from gui.battle_control import g_sessionProvider
from helpers import i18n
from helpers.aop import Aspect
from tutorial.control.battle.functional import IDirectionIndicator
from tutorial.control import g_tutorialWeaver
from tutorial.gui.Scaleform.battle.legacy import ScaleformLayout
from tutorial.logger import LOG_CURRENT_EXCEPTION, LOG_MEMORY, LOG_ERROR
class _DirectionIndicator(Flash, IDirectionIndicator):
__SWF_FILE_NAME = 'DirectionIndicator.swf'
__FLASH_CLASS = 'WGDirectionIndicatorFlash'
__FLASH_MC_NAME = 'directionalIndicatorMc'
__FLASH_SIZE = (680, 680)
def __init__(self):
Flash.__init__(self, self.__SWF_FILE_NAME, self.__FLASH_CLASS, [self.__FLASH_MC_NAME])
self.component.wg_inputKeyMode = 2
self.component.position.z = DEPTH_OF_Aim
self.movie.backgroundAlpha = 0.0
self.movie.scaleMode = 'NoScale'
self.component.focus = False
self.component.moveFocus = False
self.component.heightMode = 'PIXEL'
self.component.widthMode = 'PIXEL'
self.flashSize = self.__FLASH_SIZE
self.component.relativeRadius = 0.5
self.__dObject = getattr(self.movie, self.__FLASH_MC_NAME, None)
return
def __del__(self):
LOG_MEMORY('DirectionIndicator deleted')
def setShape(self, shape):
if self.__dObject:
self.__dObject.setShape(shape)
def setDistance(self, distance):
if self.__dObject:
self.__dObject.setDistance(distance)
def setPosition(self, position):
self.component.position3D = position
def track(self, position):
self.active(True)
self.component.visible = True
self.component.position3D = position
def remove(self):
self.__dObject = None
self.close()
return
class ShowBattleAspect(Aspect):
def __init__(self):
super(ShowBattleAspect, self).__init__()
self.__skipFirstInvoke = True
def atCall(self, cd):
if self.__skipFirstInvoke:
self.__skipFirstInvoke = False
cd.avoid()
class MinimapDefaultSizeAspect(Aspect):
def __init__(self, uiHolder):
super(MinimapDefaultSizeAspect, self).__init__()
minimap = getattr(uiHolder, 'minimap', None)
if minimap:
setSize = getattr(minimap, 'onSetSize', None)
if setSize and callable(setSize):
try:
setSize(0, AccountSettings.getSettingsDefault('minimapSize'))
except TypeError:
LOG_CURRENT_EXCEPTION()
else:
LOG_ERROR('Minimap method "onSetSize" is not found', minimap)
return
def atCall(self, cd):
if cd.function.__name__ == 'storeMinimapSize':
cd.avoid()
def atReturn(self, cd):
result = cd.returned
if cd.function.__name__ == 'getStoredMinimapSize':
result = AccountSettings.getSettingsDefault('minimapSize')
cd.change()
return result
def normalizePlayerName(pName):
if pName.startswith('#battle_tutorial:'):
pName = i18n.makeString(pName)
return pName
class BattleLayout(ScaleformLayout):
def __init__(self, swf):
super(BattleLayout, self).__init__(swf)
self.__dispatcher = None
return
def _resolveGuiRoot(self):
proxy = None
try:
app = g_appLoader.getDefBattleApp()
if not app:
return
proxy = weakref.proxy(app)
self._guiRef = weakref.ref(app)
dispatcher = self.getDispatcher()
if dispatcher is not None and proxy is not None:
dispatcher.populateUI(proxy)
except AttributeError:
LOG_CURRENT_EXCEPTION()
return proxy
def _setMovieView(self, movie):
dispatcher = self.getDispatcher()
if dispatcher is not None:
dispatcher.findGUI(root=movie)
super(BattleLayout, self)._setMovieView(movie)
return
def _getDirectionIndicator(self):
indicator = None
try:
indicator = _DirectionIndicator()
except AttributeError:
LOG_CURRENT_EXCEPTION()
return indicator
def init(self):
result = super(BattleLayout, self).init()
if result:
g_sessionProvider.getCtx().setNormalizePlayerName(normalizePlayerName)
g_tutorialWeaver.weave('gui.app_loader', '_AppLoader', '^showBattle$', aspects=(ShowBattleAspect,))
g_tutorialWeaver.weave('gui.Scaleform.Minimap', 'Minimap', '^getStoredMinimapSize|storeMinimapSize$', aspects=(MinimapDefaultSizeAspect(self.uiHolder),))
return result
def show(self):
g_appLoader.showBattle()
def clear(self):
if self._guiRef is not None and self._guiRef() is not None:
if self._movieView is not None:
self._movieView.clearStage()
return
def fini(self, isItemsRevert = True):
g_sessionProvider.getCtx().resetNormalizePlayerName()
dispatcher = self.getDispatcher()
if dispatcher is not None:
dispatcher.dispossessUI()
dispatcher.clearGUI()
super(BattleLayout, self).fini(isItemsRevert=isItemsRevert)
return
def getSceneID(self):
return 'Battle'
def showMessage(self, text, lookupType = None):
self.uiHolder.call('battle.VehicleMessagesPanel.ShowMessage', [lookupType, text, 'green'])
def getGuiRoot(self):
try:
root = g_appLoader.getDefBattleApp()
except AttributeError:
LOG_CURRENT_EXCEPTION()
root = None
return root
def setDispatcher(self, dispatcher):
self.__dispatcher = dispatcher
def getDispatcher(self):
return self.__dispatcher
def setTrainingPeriod(self, currentIdx, total):
if self._movieView is not None:
self._movieView.populateProgressBar(currentIdx, total)
return
def setTrainingProgress(self, mask):
if self._movieView is not None:
self._movieView.setTrainingProgressBar(mask)
return
def setChapterProgress(self, total, mask):
if self._movieView is not None:
self._movieView.setChapterProgressBar(total, mask)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tutorial\gui\scaleform\battle\layout.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:31:11 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
fd34e7360d93af75a21ed765af280a6d3f6b6297 | 17ca5bae91148b5e155e18e6d758f77ab402046d | /analysis_Astrodrz/CDFS-1/Reff_lowlimt0.2_analysis/4_analysis.py | cf5a9c58090c59fbb213e98af440ceaaaa71e402 | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,570 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 20:17:41 2018
@author: Dartoon
On the analysis of xxx
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pylab as plt
import glob
import sys
sys.path.insert(0,'../../../py_tools')
from psfs_average import psf_ave
from flux_profile import QSO_psfs_compare, profiles_compare
from matplotlib.colors import LogNorm
import os
path = os.getcwd()
ID = path.split('/')[-2]
from filter_info import filt_info
filt = filt_info[ID]
del filt_info['CID255']
# =============================================================================
# Read PSF and QSO image
# =============================================================================
QSO_bkg_value= 0.
QSO_im = pyfits.getdata('{0}_cutout.fits'.format(ID)) - QSO_bkg_value
QSO_msk = pyfits.getdata('{0}_msk.fits'.format(ID))
frame_size = 61
#frame = '{0}'.format(frame_size)
QSO_fm = len(QSO_im)
ct = (QSO_fm-frame_size)/2 # If want to cut to 61, i.e. (121-61)/2=30
import pickle
PSFs_dict = {}
QSOs_dict = {}
for key in filt_info.keys():
PSFs, QSOs=pickle.load(open('../../{0}/analysis/{0}_PSFs_QSO'.format(key),'rb'))
PSFs_dict.update({'{0}'.format(key):PSFs})
QSOs_dict.update({'{0}'.format(key):QSOs})
PSF_list = []
PSF_id = []
filter_list = []
PSF_keys = PSFs_dict.keys()
index1 = PSF_keys.index('CID1281')
del PSF_keys[index1]
index2 = PSF_keys.index('CID597')
del PSF_keys[index2]
PSF_keys.append('CID1281')
PSF_keys.append('CID597')
for key in PSF_keys:
if filt_info[key] == filt:
psfs_dict = PSFs_dict[key]
psfs = [psfs_dict[i] for i in range(len(psfs_dict))]
PSF_list += psfs
name_id = [key+"_"+str(i) for i in range(len(psfs_dict))]
PSF_id = PSF_id + name_id
filt_ = [filt_info[key]]
filter_list += filt_ * len(PSFs_dict[key])
if len(PSF_list) != len(PSF_id):
raise ValueError("The PSF_list is not consistent with PSF_id")
psf_list = [PSF_list[i][0] for i in range(len(PSF_list))]
PSF_mask_img_list = [PSF_list[i][3] for i in range(len(PSF_list))]
psf_name_list = PSF_id
# =============================================================================
# Doing the fitting
# =============================================================================
from fit_qso import fit_qso
from transfer_to_result import transfer_to_result
#from flux_profile import cr_mask_img
QSO_outer = pyfits.getdata('{0}_cutout_outer.fits'.format(ID))
from photutils import make_source_mask
mask = make_source_mask(QSO_outer, snr=2, npixels=5, dilate_size=11)
plt.imshow(QSO_outer* (1-mask*1), origin='low')
plt.close()
background_rms = np.std(QSO_outer* (1-mask*1))
print "background_rms: ", background_rms
QSO_msk = QSO_msk[ct:-ct,ct:-ct]
QSO_im = QSO_im[ct:-ct,ct:-ct]
QSO_msk = QSO_msk*0 +1 # This means no mask is added
QSO_std = pyfits.getdata('wht_err.fits')[ct:-ct,ct:-ct]
pix_s = 0.0642
##############################Fit
fixed_source = []
kwargs_source_init = []
kwargs_source_sigma = []
kwargs_lower_source = []
kwargs_upper_source = []
fixed_source.append({})
kwargs_source_init.append({'R_sersic': 0.3, 'n_sersic': 2., 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0.})
kwargs_source_sigma.append({'n_sersic': 0.5, 'R_sersic': 0.5, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1})
kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.2, 'n_sersic': 0.3, 'center_x': -0.5, 'center_y': -0.5})
kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 3., 'n_sersic': 7., 'center_x': 0.5, 'center_y': 0.5})
source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
if os.path.exists('fit_result_each')==False:
os.mkdir('fit_result_each')
import time
t1 = time.time()
fixcenter = False
filename = 'fit_result_each/each_PSF_fit_qso.txt'
if_file = glob.glob(filename)
if if_file == []:
fit_result = open(filename,'w')
elif if_file is not []:
fit_result = open(filename,"r+")
fit_result.read()
count = 0
for i in np.array(range(len(psf_name_list))):
print "by PSF: {0}".format(psf_name_list[i])
tag = 'fit_result_each/qso_fit_PSF{0}'.format(i)
psf_i = psf_list[i] * PSF_mask_img_list[i]
psf_i = psf_i[ct:-ct,ct:-ct]
source_result, ps_result, image_ps, image_host, error_map=fit_qso(QSO_im, psf_ave=psf_i, psf_std = None,
background_rms=background_rms,
source_params=source_params, QSO_msk = QSO_msk, fixcenter=fixcenter,
pix_sz = pix_s, no_MCMC =True,
QSO_std =QSO_std, tag=tag)
result = transfer_to_result(data=QSO_im, pix_sz = 'drz06',
source_result=source_result, ps_result=ps_result, image_ps=image_ps, image_host=image_host, error_map=error_map,
filt=filt, fixcenter=fixcenter,ID=ID,QSO_msk =QSO_msk, tag=tag)
if count == 0:
fit_result.write("#QSO_img intensity: {0} \n".format(round(np.sum(QSO_im*QSO_msk),2)))
fit_result.write("#fit by PSF{0}: \n".format(psf_name_list[i]))
fit_result.write('PSF_intensity:{0} \n'.format(round(np.sum(psf_i),2)))
fit_result.write(repr(result) + "\n")
count += 1
fit_result.close()
t2 = time.time()
t_tot= (t2-t1)/60
print "total time:", t_tot, "mins"
import os
os.system('say "your program has finished"')
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
e606f02c529f9a0f38e0995f590051af9b872232 | 25040bd4e02ff9e4fbafffee0c6df158a62f0d31 | /www/htdocs/wt/lapnw/data/item_70_6.tmpl.py | e17b54b72fd5f31f1772425a94076f55a01b542a | [] | no_license | erochest/atlas | 107a14e715a058d7add1b45922b0f8d03bd2afef | ea66b80c449e5b1141e5eddc4a5995d27c2a94ee | refs/heads/master | 2021-05-16T00:45:47.585627 | 2017-10-09T10:12:03 | 2017-10-09T10:12:03 | 104,338,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 70.6'
project = 'lapnw'
class page(SubtemplateCode):
pass
| [
"eric@eric-desktop"
] | eric@eric-desktop |
529e71838f247d9b8231ad8f457090ee1d7404e4 | 82c8ea097f8e92806b7a217a31a5efba701c6873 | /pickup_reply/mecab.py | cf4c994a69deb18239555f88db1c37f28293e351 | [
"MIT"
] | permissive | showyou/hama_db | c690cbbf00f87024dfaae3c54bd55d542fdf3644 | 2f85e20b014c6b9457f944fc40d2fb0a54ab7668 | refs/heads/master | 2021-01-01T05:46:58.059412 | 2010-11-29T21:21:09 | 2010-11-29T21:21:09 | 192,887 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | # -*- coding: utf-8 -*-
from ctypes import *
def sparse_all(s,mecabpath):
# ライブラリの場所を指定
# ライブラリを ctypes を使って読み込み
lib = cdll.LoadLibrary(mecabpath)
# 解析器初期化用の引数を指定(第二引数無しで普通の解析)
argc = c_int(2)
argv = (c_char_p * 2)("mecab", "")
# 解析器のオブジェクトを作る
tagger = lib.mecab_new(argc, argv)
""" 指定された文字列を品詞など調べて返す。 """
s = lib.mecab_sparse_tostr(tagger, s)
ret = c_char_p(s).value
# 終わったら、一応、殺しておく
lib.mecab_destroy(tagger)
return ret
"""
テスト内容
sparse_all("本日は晴天なり","/usr/hoge/libmecab.so")
>> 本日 は 晴天 なり
"""
| [
"showyou41@gmail.com"
] | showyou41@gmail.com |
320d40d2f98c8bac89dc765a6f3310cf45b56da9 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201029155943.py | 4c1ff6afab2ac16764ab9e7e27ebfa2dd75ca3e6 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,703 | py | from django import forms
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
from wagtail.contrib.table_block.blocks import TableBlock
#Walidacja problemu
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
internal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
def clean(self, value):
internal_page = value.get('internal_page')
external_link = value.get('external_link')
errors = {}
if internal_page and external_link:
errors['internal_page'] = ErrorList(['Nie można wybrać obu pól jednocześnie. Proszę wybrać jedną opcję.'])
errors['external_link'] = ErrorList(['Nie można wybrać obu pól jednocześnie. Proszę wybrać jedną opcję.'])
elif not internal_page and not external_link:
errors['internal_page'] = ErrorList(['Proszę wybierz jedną ze stron wewnętrznych LUB wpo'])
errors['external_link'] = ErrorList(['Nie można wybrać obu pól jednocześnie. Proszę wybrać jedną opcję.'])
if errors:
raise ValidationError('Błąd weryfikacji w Twoim linku', params=errors)
return super().clean(value)
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class RadioSelectBlock(blocks.ChoiceBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field.widget = forms.RadioSelect(
choices=self.field.widget.choices
)
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie przycięty do rozmiaru 786 na 552 px.')
image_alignment = RadioSelectBlock(
choices = (
('left','Opraz po lewej stronie'),
('right', 'Obraz po prawej stronie'),
),
default = 'left',
help_text = 'Obraz po lewej stronie, tekst po prawej lub obraz po prawej stronie tekst po lewej.'
)
title = blocks.CharBlock(
max_length=60,
help_text='Maksymalna długość 60 znaków.'
)
text = blocks.CharBlock(
max_length = 140,
required = False,
)
link = Link()
class Meta:
template = 'streams/image_and_text_block.html'
icon = 'image'
label = 'Obraz & Tekst'
class CallToActionBlock(blocks.StructBlock):
title =blocks.CharBlock(
max_length = 200,
help_text = 'Maksymalnie 200 znaków.'
)
link = Link()
class Meta:
template = 'streams/call_to_action_block.html'
icon = 'plus'
label = 'Wezwanie do działania'
class PricingTableBlock(TableBlock):
"""Blok tabeli cen."""
class Meta:
template = 'streams/pricing_table_block.html'
label = 'Tabela cen'
icon = 'table'
help_text = 'Twoje tabele z cenami powinny zawierać zawsze 4 kolumny.'
'''
class RichTextWithTitleBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=50)
context = blocks.RichTextBlock(features=[])
class Meta:
template = 'streams/simple_richtext_block.html'
''' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
60f8fa22445c9d5796880e6d432e9e5de7063514 | cd77df7e0adf69f78cd86e0b09ffeba7acd7e1ba | /utilspy/uvmot.py | 6b9d2186ac07f295e6ae2387c93a8fdd69aba8ed | [] | no_license | huletlab/apparatus3-seq | 963b6ded59f53ae6ad714569559fe6f2cd08b610 | 3520f243c1d6d47dcf126cd4aa9e0f6cdaf2fb9a | refs/heads/master | 2021-01-25T04:53:00.095687 | 2014-02-21T04:42:41 | 2014-02-21T04:42:41 | 3,621,818 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | """Constructs the ramps for doing UV cooling and fluorescence imaging
"""
import wfm, gen, math, cnc
report=gen.getreport()
def f(sec,key):
global report
return float(report[sec][key])
#GET SECTION CONTENTS
uvsec = gen.getsection('UV')
def uvRamps(motpow, bfield, ENDCNC):
ss=f('CNC','cncstepsize')
uvdt = f('UV','dt')
#---Ramp down red power
motpow.linear( 0.002, uvdt) #0.002 is max attenuation before RF switch
#---Bfield ramp
dtload = f('UV','dtload_bfield')
dtcnc = f('UV','dtcnc_bfield')
uvhold = f('UV','uvhold')
#OBSOLETE
#bfield.linear( f('UV','uvbfield'), uvdt)
#bfield.appendhold(dtload)
#bfield.linear( f('UV','uvbfieldf'), dtcnc)
bfield.linear( uvsec.uvbfield, uvsec.dt)
bfield.appendhold( uvsec.dtload_bfield)
bfield.linear( uvsec.uvbfieldf, uvsec.dtcnc_bfield)
#OBSOLETE
#bfield.appendhold(uvhold)
ENDUVMOT = max( motpow.dt(), bfield.dt() )
#---UVPOW ramps
#OBSOLETE
#dtload_uvpow = f('UV','dtload_uvpow')
#dtcnc_uvpow = f('UV','dtcnc_uvpow')
#
uvpow= wfm.wave('uvpow', f('UV','uvpow'),ss)
uvpow2= wfm.wave('uvpow2',f('UV','uvpow2'),ss)
#
uvpow.extend( ENDCNC + uvsec.dt + uvsec.dtload_uvpow)
uvpow2.extend(ENDCNC + uvsec.dt + uvsec.dtload_uvpow)
#
uvpow.linear( f('UV','uvpowf'), uvsec.dtcnc_uvpow)
uvpow2.linear( f('UV','uvpow2f') , uvsec.dtcnc_uvpow)
#---ENDUVMOT is defined as the point where the longest of bfield
#---or uvpow ramps ends
maxramp = max( motpow.dt(), bfield.dt(), uvpow.dt(), uvpow2.dt() )
ENDUVMOT = maxramp + uvhold
bfield.extend(ENDUVMOT)
uvpow.extend(ENDUVMOT)
uvpow2.extend(ENDUVMOT)
motpow.extend(ENDUVMOT)
bfield.extend(ENDUVMOT)
uvpow.extend(ENDUVMOT)
uvpow2.extend(ENDUVMOT)
return uvpow2, uvpow, motpow, bfield, ENDUVMOT
def run(s,camera):
global report
ss=f('CNC','cncstepsize')
# Cool and Compress MOT
# DURATION is defined as the time up to release from the MOT
motpow, repdet, trapdet, reppow, trappow, bfield, ENDCNC = cnc.cncRamps()
# Load UVMOT from CNCMOT
uvpow2, uvpow, motpow, bfield, ENDUVMOT = uvRamps(motpow, bfield, ENDCNC)
repdet.extend(ENDUVMOT)
trapdet.extend(ENDUVMOT)
reppow.extend(ENDUVMOT)
trappow.extend(ENDUVMOT)
# Imaging
motpow, repdet, trapdet, reppow, trappow, bfield, maxDT = cnc.imagingRamps(motpow, repdet, trapdet, reppow, trappow, bfield,camera)
uvpow.extend(maxDT)
uvpow2.extend(maxDT)
#---Add waveforms to sequence
s.analogwfm_add(ss,[ motpow, repdet, trapdet, bfield, reppow, trappow, uvpow, uvpow2])
#wait normally rounds down using floor, here the duration is changed before so that
#the wait is rounded up
ENDUVMOT = ss*math.ceil(ENDUVMOT/ss)
#---Insert QUICK pulse for fast ramping of the field gradient during CNC
s.wait(-10.0)
quickval = 1 if gen.bstr('CNC',report) == True else 0
s.digichg('quick',quickval)
s.wait(10.0)
s.wait(ENDCNC)
#s.digichg('quick',0)
#---Go back in time, shut down the UVAOM's and open the shutter
s.wait(-50.0)
s.digichg('uvaom1',0)
s.digichg('uvaom2',0)
s.digichg('uvshutter',1)
s.wait(50.0)
#---Turn OFF red light
delay_red = float(report['UV']['delay_red'])
s.wait(delay_red)
s.digichg('motswitch',0)
#s.digichg('motshutter',1)
s.wait(-delay_red)
#---Turn ON UVAOM's
delay_uv = float(report['UV']['delay_uv'])
s.wait(delay_uv)
s.digichg('uvaom1',1)
s.digichg('uvaom2',1)
s.wait(-delay_uv)
s.wait(-ENDCNC)
#---Go to MOT release time and set QUICK back to low, turn off UV
s.wait(ENDUVMOT)
s.digichg('quick',0)
s.digichg('uvaom1',0)
s.digichg('uvaom2',0)
#---Turn red light back on for imaging.
s.digichg('motswitch',1)
#print s.tcur
return s, ENDUVMOT
| [
"pmd323@gmail.com"
] | pmd323@gmail.com |
0e9f2a4bca05c6bd1b662f61f917684cdc36083a | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/urllib/request.py | a39421cce560c62be423975ed1461c6490561f92 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:074ef4302fbdb2e3b03a987c564227ccd5c4d0dc27a56fb24beb709814e73e11
size 102738
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
c1c22d688967c45465165811cbbbe95342e6414c | 1c390cd4fd3605046914767485b49a929198b470 | /leetcode/lonely-pixel-i.py | 5f3ced4fc5e5f457681a751ac05c931059e801c4 | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution:
def findLonelyPixel(self, a: List[List[str]]) -> int:
n = len(a)
m = len(a[0])
r = [0] * n
c = [0] * m
for i in range(n):
for j in range(m):
if a[i][j] == 'B':
r[i] += 1
c[j] += 1
z = 0
for i in range(n):
for j in range(m):
if a[i][j] == 'B' and r[i] == 1 and c[j] == 1:
z += 1
return z | [
"wwwwodddd@gmail.com"
] | wwwwodddd@gmail.com |
ea4b72f3d8b2d4d50e61c03ff85880fb06f0c6c0 | 1608dfc2ddbccd9587de08bb0e5d41f33d729ec9 | /validation/.svn/pristine/ea/ea4b72f3d8b2d4d50e61c03ff85880fb06f0c6c0.svn-base | 1268252388847b9711043d30aca9b75ce3b0d382 | [] | no_license | dkhaldi/PIPS_TaskParallelization | d623d3ae334d0e1c63772bae86bb0c115e928708 | f324f3374cc8cbb58498b9dde172b1e7596abe1c | refs/heads/master | 2020-07-03T14:16:30.427670 | 2016-11-18T21:44:34 | 2016-11-18T21:44:34 | 74,167,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | from validation import vworkspace
import pypsex
with vworkspace() as w:
w.props.keep_read_read_dependence=True
w.props.print_dependence_graph_using_sru_format=True
w.all_functions.dump_chains_or_dg("chains")
| [
"dounia@eureka.cs.uh.edu"
] | dounia@eureka.cs.uh.edu | |
c71784fd27a5edea121a2017128cfe622e3eef4b | b125b852ada9a22a908fac379f9bee4be9eb050a | /Jump_to_python_BuiltInfunction/map/map_2.py | bfe509db5d0d29d3e2a326cd8b65d99927687055 | [] | no_license | rheehot/Python_Master | 73f32a83e9f2ea469eb6ac558a95cbf5b7c436d9 | 0c009f2bcf8810dd1b7826dc34ff58afb996dc86 | refs/heads/main | 2023-08-25T13:13:45.428375 | 2021-10-19T14:12:28 | 2021-10-19T14:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # map 으로 바꿔서 푸는 예제
def two_times(x): return x*2
print(list(map(two_times, [1,2,3,4]))) # [2, 4, 6, 8]
# 예제와 map으로 바꾼 예제의 차이가 무엇인가?
# map(f, iterable)이었던 것을 생각하면 map의 앞에 함수만 넣고, 뒤에는 반복 가능한 객체를 넣었다.
# 그리고 two_times라는 함수를 통해 객체는 2를 곱한 값이 출력된다.
# 이것을 lambda로 만들어본다면?
| [
"suyeon.chaa@gmail.com"
] | suyeon.chaa@gmail.com |
7c3ced0e9c09897cd66dc5183e129adb68fbad98 | aa1e637de90f69f9ae742d42d5b777421617d10c | /nitro/resource/base/base_response.py | b4f378117b73fa8714ec16e2163ef6423bdf26e6 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | km0420j/nitro-python | db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | refs/heads/master | 2021-10-21T18:12:50.218465 | 2019-03-05T14:00:15 | 2019-03-05T15:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py |
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class base_response:
"""base_response is a abstract base class for all the netscaler config/stat response classes."""
def __init__(self):
self.errorcode = 0
self.message = ""
self.sessionid = ""
self.severity = ""
| [
"lennart.weller@hansemerkur.de"
] | lennart.weller@hansemerkur.de |
741c3d7c6f3f8d79345b4443251fe1fc50817e2c | 9a6c50efcc21d6201e7406008f447347c5677852 | /solutions/070.climbing-stairs/climbing-stairs.py | a50d47133c175ef65e952417cec953675b2fde1e | [] | no_license | 343829084/leetcode-2 | 29ec91364cb89230699f135a2dd6ff33f0066c5f | 0a992ec41b0a138134cfbbad01b9fad3450cf9f9 | refs/heads/master | 2020-04-21T17:15:39.201420 | 2018-10-12T13:27:47 | 2018-10-12T13:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
a, b = 1, 2
if n == 1:
return 1
if n == 2:
return 2
i = 3
result = 0
while i <= n:
result = a + b
a, b = b, result
i += 1
return result
| [
"jachin1992@hotmail.com"
] | jachin1992@hotmail.com |
9bbc0bc0f0569808cb961f68600af0f2026840c2 | 6b05bddf2e294c8e1b39846aecadfa06b4ff805d | /test/test_v1_virtual_machine_instance_phase_transition_timestamp.py | 18e54500812ddd2443f2ff643e1469a059357cad | [
"Apache-2.0"
] | permissive | kubevirt/client-python | 5ca82fe55d48c07f62796d2bed3605a7c189922c | 235fe17f58d41165010be7e4122cb67bdc866fe7 | refs/heads/master | 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 | Apache-2.0 | 2022-10-20T13:52:10 | 2017-09-27T12:51:32 | Python | UTF-8 | Python | false | false | 1,173 | py | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_virtual_machine_instance_phase_transition_timestamp import V1VirtualMachineInstancePhaseTransitionTimestamp
class TestV1VirtualMachineInstancePhaseTransitionTimestamp(unittest.TestCase):
""" V1VirtualMachineInstancePhaseTransitionTimestamp unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1VirtualMachineInstancePhaseTransitionTimestamp(self):
"""
Test V1VirtualMachineInstancePhaseTransitionTimestamp
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_virtual_machine_instance_phase_transition_timestamp.V1VirtualMachineInstancePhaseTransitionTimestamp()
pass
if __name__ == '__main__':
unittest.main()
| [
"kubevirt-bot"
] | kubevirt-bot |
ebc1535424c18c950bf0e834993daae89163a701 | 26efa024b1e7c3ff0be58bfc249039a89a2c4293 | /trainer/task.py | 5fa4d0ed2b9ed46ba7ce8c234c5f30d488c05c52 | [
"Apache-2.0"
] | permissive | tikyau/distributed_tensorflow | a91a6efa12aea328c393b3559d778961b3d651f8 | ffe893973e51dff4ee790cb19cc8c3d9df75e834 | refs/heads/master | 2021-08-27T20:09:06.449977 | 2017-11-28T06:48:29 | 2017-11-28T06:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,657 | py | #!/usr/bin/env python
"""
Usage:
TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"index": 0, "type": "ps"}}' python -m trainer.task
TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"index": 0, "type": "worker"}}' python -m trainer.task
TF_CONFIG='{"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"index": 1, "type": "worker"}}' python -m trainer.task
"""
import datetime
import json
import os
import numpy as np
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch_number", 10, "Number of steps to run trainer")
flags.DEFINE_string("checkpoint_dir", "./checkpoint/",
"The checkpoint directory")
flags.DEFINE_float("learning_rate", 0.01, "Initial learning rate")
FLAGS = flags.FLAGS
def main():
# Create train dataset
train_X = np.linspace(-1, 1, 100).reshape((100, 1))
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
start_training_time = datetime.datetime.now()
# Run standalone training
if os.environ.get('TF_CONFIG', "") == "":
X_placeholder = tf.placeholder("float", shape=[None, 1])
Y_placeholder = tf.placeholder("float", shape=[None, 1])
w = tf.get_variable("w", [1], initializer=tf.random_normal_initializer())
b = tf.get_variable("b", [1], initializer=tf.random_normal_initializer())
loss = tf.reduce_sum(tf.square(Y_placeholder - X_placeholder * w - b))
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
for epoch_index in range(FLAGS.epoch_number):
_, loss_value = sess.run(
[train_op, loss],
feed_dict={X_placeholder: train_X,
Y_placeholder: train_Y})
if epoch_index % 1 == 0:
print("Epoch: {}, loss: {}".format(epoch_index, loss_value))
w_value, b_value = sess.run([w, b])
end_training_time = datetime.datetime.now()
print("[{}] End of standalone training, w: {}, b:{}".format(
end_training_time - start_training_time, w_value, b_value))
# Run distributed training
else:
# Exampmle: {"cluster": {"ps": ["127.0.0.1:3001"], "worker": ["127.0.0.1:3002", "127.0.0.1:3003"]}, "task": {"index": 0, "type": "worker"}}
tf_config_env = json.loads(os.environ.get("TF_CONFIG"))
cluster_spec = tf_config_env.get("cluster")
task_data = tf_config_env.get("task")
task_type = task_data.get("type")
task_index = task_data.get("index")
cluster = tf.train.ClusterSpec(cluster_spec)
server = tf.train.Server(
cluster, job_name=task_type, task_index=task_index)
if task_type == "ps":
server.join()
elif task_type == "worker":
with tf.device(
tf.train.replica_device_setter(
worker_device="/job:worker/task:{}".format(task_index),
cluster=cluster)):
X_placeholder = tf.placeholder("float", shape=[None, 1])
Y_placeholder = tf.placeholder("float", shape=[None, 1])
w = tf.get_variable(
"w", [1], initializer=tf.random_normal_initializer())
b = tf.get_variable(
"b", [1], initializer=tf.random_normal_initializer())
loss = tf.reduce_sum(tf.square(Y_placeholder - X_placeholder * w - b))
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = optimizer.minimize(loss, global_step=global_step)
# hooks=[tf.train.StopAtStepHook(last_step=100)]
is_chief = task_index == 0
with tf.train.MonitoredTrainingSession(
master=server.target,
is_chief=is_chief,
checkpoint_dir=FLAGS.checkpoint_dir) as sess:
while not sess.should_stop():
for epoch_index in range(FLAGS.epoch_number):
_, loss_value = sess.run(
[train_op, loss],
feed_dict={X_placeholder: train_X,
Y_placeholder: train_Y})
if epoch_index % 1 == 0:
print("Epoch: {}, loss: {}".format(epoch_index, loss_value))
w_value, b_value = sess.run([w, b])
end_training_time = datetime.datetime.now()
print("[{}] End of standalone training, w: {}, b:{}".format(
end_training_time - start_training_time, w_value, b_value))
return
if __name__ == "__main__":
main()
| [
"tobeg3oogle@gmail.com"
] | tobeg3oogle@gmail.com |
5ca9378761342bf1e4411f7baa9887be64960548 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /ui/ozone/platform/wayland/DEPS | 44ef7beabfd129c5475f711e54b6052821058cf5 | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 473 | include_rules = [
"+ui/base/buildflags.h", # Doesn't bring in all of ui/base.
"+ui/base/hit_test.h", # UI hit test doesn't bring in all of ui/base.
"+ui/base/ui_base_features.h",
"+ui/gtk/gtk_ui_delegate.h",
"+mojo/public",
"+ui/base/clipboard/clipboard_constants.h",
"+ui/base/dragdrop/drag_drop_types.h",
"+ui/base/dragdrop/file_info/file_info.h",
"+ui/base/dragdrop/os_exchange_data.h",
"+ui/base/dragdrop/os_exchange_data_provider_non_backed.h",
]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
c021711cb5b476acb904884dbe3148fb30a598be | 7c8e59d8424b51422fdc7809e2e58ee5a732d01e | /pack.py | af98619093f574cfa3b4ae8d0a0db7c92672ccf9 | [] | no_license | amnh-digital/hope-climate-ia | 530b91f48c99acc5d1952e8d14ee7c0f80fb3deb | ab479c893628541b062d1e998a9ed222b5f69eaf | refs/heads/master | 2022-11-09T01:23:58.672888 | 2022-10-25T13:10:57 | 2022-10-25T13:10:57 | 114,143,596 | 4 | 2 | null | 2023-09-06T20:47:30 | 2017-12-13T16:26:16 | HTML | UTF-8 | Python | false | false | 7,594 | py | # -*- coding: utf-8 -*-
# This is a script that will attempt to package one of the web apps into a single html file plus a folder of non-text assets.
import argparse
from bs4 import BeautifulSoup
import csv
import glob
import os
from pprint import pprint
import re
import shutil
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-app', dest="APP", default="temperature-timescales/embed.html", help="Path to webpage.")
parser.add_argument('-aurl', dest="ASSET_URL", default="https://amnh.org/assets/", help="Base url where the assets will be hosted")
parser.add_argument('-ap', dest="ASSET_PREFIX", default="", help="Adds a prefix to all asset files")
parser.add_argument('-ad', dest="ASSET_DIRS", default="temperature-timescales/img/,temperature-timescales/data/*.json,temperature-timescales/config/*.json,temperature-timescales/content/*.json,shared/audio/key.mp3", help="Comma-separated list of directories of assets")
parser.add_argument('-am', dest="ASSET_MAP", default="packages/temperature-timescales-assets.csv", help="CSV file with mapping from filename to url or javascript variable")
parser.add_argument('-out', dest="OUTPUT_DIR", default="packages/temperature-timescales/", help="Output directory")
args = parser.parse_args()
APP = args.APP
ASSET_URL = args.ASSET_URL.strip()
ASSET_PREFIX = args.ASSET_PREFIX.strip()
ASSET_DIRS = args.ASSET_DIRS.strip().split(",")
OUTPUT_DIR = args.OUTPUT_DIR.strip()
ASSET_MAP = args.ASSET_MAP.strip()
ASSET_DIR = OUTPUT_DIR + "assets/"
vendorCdns = [
{"match": ".*shared/css/vendor/normalize\-([0-9\.]+)\.min\.css", "replace": "https://cdnjs.cloudflare.com/ajax/libs/normalize/%s/normalize.min.css"},
{"match": ".*shared/css/vendor/plyr\-([0-9\.]+)\.css", "replace": "https://cdnjs.cloudflare.com/ajax/libs/plyr/%s/plyr.css"},
{"match": ".*shared/js/vendor/jquery\-([0-9\.]+)\.min\.js", "replace": "https://code.jquery.com/jquery-%s.min.js"},
{"match": ".*shared/js/vendor/underscore\-([0-9\.]+)\.min\.js", "replace": "https://cdnjs.cloudflare.com/ajax/libs/underscore.js/%s/underscore-min.js"},
{"match": ".*shared/js/vendor/pixi\-([0-9\.]+)\.min\.js", "replace": "https://cdnjs.cloudflare.com/ajax/libs/pixi.js/%s/pixi.js"},
{"match": ".*shared/js/vendor/three\-([0-9\.]+)\.min\.js", "replace": "https://cdnjs.cloudflare.com/ajax/libs/three.js/%s/three.min.js"},
{"match": ".*shared/js/vendor/howler\-([0-9\.]+)\.min\.js", "replace": "https://cdnjs.cloudflare.com/ajax/libs/howler/%s/howler.min.js"},
{"match": ".*shared/js/vendor/plyr\-([0-9\.]+)\.min\.js", "replace": "https://cdnjs.cloudflare.com/ajax/libs/plyr/%s/plyr.min.js"}
]
inputDir = os.path.dirname(APP)
cssAssetPattern = re.compile("url\(\"?\'?([a-zA-Z\/\.]*\/)([a-zA-Z0-9\-\_]+\.[a-z]+)\"?\'?\)")
assetMap = {}
assetVars = []
assets = []
# Make output directory
assetDir = os.path.dirname(ASSET_DIR)
if not os.path.exists(assetDir):
os.makedirs(assetDir)
# Read asset map file if exists
if len(ASSET_MAP) and os.path.isfile(ASSET_MAP):
with open(ASSET_MAP, 'rb') as f:
lines = list(f)
reader = csv.DictReader(lines, skipinitialspace=True)
rows = list(reader)
mapRows = [r for r in rows if "filename" in r and len(r["filename"]) > 0]
varRows = [r for r in rows if "var" in r and len(r["var"]) > 0]
assetMap = dict([(r["filename"], r["url"]) for r in mapRows])
assetVars = [(r["var"], r["url"]) for r in varRows]
# Parse the html
soup = None
with open(APP) as f:
soup = BeautifulSoup(f, 'html.parser')
def cssFileToString(filename):
global assets
global cssAssetPattern
global inputDir
global assetMap
fileDir = os.path.dirname(filename)
cssStr = ""
with open(filename, 'r') as f:
cssStr = f.read()
for match in cssAssetPattern.finditer(cssStr):
matchDir, matchFile = match.groups()
path = os.path.relpath(fileDir + "/" + matchDir + matchFile)
assets.append(path)
# print("%s + %s = %s" % (matchDir, matchFile, path))
# Replace relative urls absolute urls
assetUrl = assetMap[matchFile] if matchFile in assetMap else ASSET_URL+ASSET_PREFIX+matchFile
cssStr = re.sub(r'url\(\"?\'?'+matchDir+matchFile+'\"?\'?\)', "url("+assetUrl+")", cssStr)
return cssStr
def jsFileToString(filename):
jsStr = ""
with open(filename, 'r') as f:
jsStr = f.read()
return jsStr
def matchCdn(path, cdns):
foundMatch = None
for cdn in cdns:
pattern = cdn["match"]
replace = cdn["replace"]
matches = re.match(pattern, path)
if matches:
version = matches.group(1)
foundMatch = replace % version
break
return foundMatch
# Parse stylesheets
links = soup.find_all("link", rel="stylesheet")
newTags = []
for i, link in enumerate(links):
path = os.path.relpath(inputDir + "/" + link.get('href')) # gets file path relative to script
# check for common vendors and link to cdn
cdnMatch = matchCdn(path, vendorCdns)
cssStr = cssFileToString(path) if not cdnMatch else ""
newTag = soup.new_tag("style")
if cdnMatch:
newTag = soup.new_tag("link")
newTag["href"] = cdnMatch
newTag["rel"] = "stylesheet"
newTag["type"] = "text/css"
newTag["crossorigin"] = "anonymous"
newTag.string = cssStr
# link.replace_with(newTag)
link.decompose()
newTags.insert(0, newTag)
# Prepend style tags to the body tag
body = soup.find('body')
for newTag in newTags:
body.insert(0, newTag)
# Parse javascript
scripts = soup.find_all("script", src=True)
varsAdded = False
for i, script in enumerate(scripts):
path = os.path.relpath(inputDir + "/" + script.get('src'))
# check for common vendors and link to cdn
cdnMatch = matchCdn(path, vendorCdns)
jsStr = ""
# if we're at the first non-vendor tag, prepend variables that we want to insert in the javascript
if not varsAdded and not cdnMatch:
for name, value in assetVars:
jsStr += "var %s = \"%s\";\n" % (name, value)
varsAdded = True
jsStr = jsStr + jsFileToString(path) if not cdnMatch else jsStr
# replace the tag with a new one
newTag = soup.new_tag("script")
if cdnMatch:
newTag["src"] = cdnMatch
newTag["crossorigin"] = "anonymous"
newTag.string = jsStr
script.replace_with(newTag)
# Write HTML file
outputStr = soup.prettify()
with open(OUTPUT_DIR + "index.html", "w") as f:
f.write(outputStr.encode('utf-8'))
# Retrieve assets
for dir in ASSET_DIRS:
files = []
if "*" in dir:
files = glob.glob(dir)
elif os.path.isfile(dir):
files = [dir]
else:
files = [os.path.join(dir, f) for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
assets += files
assets = list(set(assets))
# Empty asset folder
for f in os.listdir(assetDir):
path = os.path.join(assetDir, f)
if os.path.isfile(path):
os.unlink(path)
# Write assets
for asset in assets:
filename = os.path.basename(asset)
dest = ASSET_DIR + ASSET_PREFIX + filename
extension = os.path.splitext(filename)[1]
shutil.copyfile(asset, dest)
# if json, find-replace in file
if extension == ".json":
fileStr = ""
with open(dest, 'r') as f :
fileStr = f.read()
for filename in assetMap:
pattern = "\"[^\"]*"+filename+"\""
fileStr = re.sub(pattern, "\""+assetMap[filename]+"\"", fileStr)
with open(dest, 'w') as f:
f.write(fileStr)
print("Done.")
| [
"brian@youaremyjoy.org"
] | brian@youaremyjoy.org |
b5be513c0f4c18afef9d6bfe13ca2adcc51d71d8 | 49f9be10fe253316abd23df97e43081819e540e9 | /HW_3_Afanaseva_Maria/server.py | a6e58f906ef93c39b044dc18f6795aabbefc3be5 | [] | no_license | MariaAfanaseva/app | 0288bde181c58ba5bbc0046824a0244a14c700d2 | dc8fcd0a2634d6795e4ef10459585ac7d6698ef3 | refs/heads/master | 2020-07-25T10:54:09.892424 | 2019-09-16T19:46:20 | 2019-09-16T19:46:20 | 208,265,281 | 0 | 0 | null | 2019-09-16T19:46:21 | 2019-09-13T12:59:25 | Python | UTF-8 | Python | false | false | 1,845 | py | import socket
import sys
import re
from common.variables import *
from common.utils import *
def get_addr_port():
args = sys.argv
try:
if '-a' in args:
address = args[args.index('-a') + 1]
else:
address = ''
except IndexError:
print('После параметра \'a\'- необходимо указать адрес, который будет слушать сервер.')
exit(1)
try:
if '-p' in args:
port = int(args[args.index('-p') + 1])
else:
port = DEFAULT_PORT
except IndexError:
print('После параметра -\'p\' необходимо указать номер порта.')
exit(1)
try:
if address and not re.match(r'^(([1-9]?[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.)'
r'{3}([1-9]?[0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$', address) or not 1024 < port < 65535:
raise ValueError
except ValueError:
print('Неверные значения port (-p) или ip (-a)')
exit(1)
return address, port
def valid_client_msg(message):
if ACTION in message and TIME in message and USER in message and message[ACTION] == PRESENCE:
return {RESPONSE: 200}
else:
return {
RESPONSE: 400,
ERROR: 'Bad Request'
}
def main():
contact = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Готовим сокет
contact.bind(get_addr_port())
contact.listen(MAX_CONNECTIONS) # Слушаем порт
while True:
client, client_address = contact.accept()
message = get_msg(client)
print(message)
response = valid_client_msg(message)
send_msg(client, response)
client.close()
if __name__ == '__main__':
main()
| [
"mashenkachukina@gmail.com"
] | mashenkachukina@gmail.com |
719df14aac850bc0a93a316b6a465cafdbca7fc8 | 05d8f3771e81fd2d87c0dff05deb67a3455794f3 | /autocall/urls.py | 55bf55e1792cc604bf056ec13ea9f75ea24e5171 | [
"MIT"
] | permissive | shashank-sharma/mythical-feedback | 8626d8e88cfdc7b3461bdc5fc413ad99b98bb74f | 4ec08f7590c5a9b9455b20243b21e4ac5e884c55 | refs/heads/master | 2022-12-14T11:58:45.069998 | 2019-07-09T15:28:53 | 2019-07-09T15:28:53 | 175,766,244 | 3 | 1 | MIT | 2022-12-08T04:58:33 | 2019-03-15T06:59:32 | JavaScript | UTF-8 | Python | false | false | 670 | py | from django.conf.urls import url
from .api.views import MakeCallAPIView,\
AnswerCallAPIView, NextCallAPIView, SaveCallAPIView, UpdateStatusCallAPIView, SurveyResponseAPIView
urlpatterns = [
url(r'^call/$', MakeCallAPIView.as_view(), name='autocall-call'),
url(r'^answer/', AnswerCallAPIView.as_view(), name='autocall-answer'),
url(r'^next/$', NextCallAPIView.as_view(), name='autocall-next'),
url(r'^save/', SaveCallAPIView.as_view(), name='autocall-save'),
url(r'^status/', UpdateStatusCallAPIView.as_view(), name='autocall-status'),
url(r'^survey/responses/(?P<pk>\d+)', SurveyResponseAPIView.as_view(), name='autocall-survey-response'),
] | [
"shashank.sharma98@gmail.com"
] | shashank.sharma98@gmail.com |
d320f6d6ee50773dcf8e1f946aacc325846d087e | 8f8e378c0ce4224244582c506c268edda3cc3b30 | /DL_Practice/Day3/DLday03_02.py | 5ced3a1fc9ea626c45b148be724e13b1efb4b080 | [] | no_license | srsapireddy/Diploma-in-AI_NIELIT_Files | 223318319b2d4b8647d77b99d1ba03f0d6e15cf6 | 9e2ed78fbe03369ebef1aa81f3417fc21bdd4107 | refs/heads/master | 2021-05-17T14:28:00.059617 | 2020-03-29T09:28:04 | 2020-03-29T09:28:04 | 250,820,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import fashion_mnist
(X_train,y_train),(X_test,y_test)=fashion_mnist.load_data()
print(X_train.shape)
from keras.utils import to_categorical
classes=np.unique(y_train)
nClasses=len(classes)
print(nClasses)
print(classes)
plt.figure(figsize=[10,5])
plt.subplot(121)
plt.imshow(X_train[0,:,:],cmap='gray')
plt.title(y_train[0])
#plt.show()
dimData=np.prod(X_train.shape[1:])
print(dimData)
print(X_train[0])
X_train=X_train.reshape(X_train.shape[0],1,28,28)
X_test=X_test.reshape(X_test.shape[0],1,28,28)
X_train=X_train.astype('float32')
y_train=y_train.astype('float32')
X_train/=255
X_test=X_test/255
print(X_test)
y_train1=to_categorical(y_train)
y_test1=to_categorical(y_test)
print(y_train)
print(y_train1[1:10])
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras import backend as K
K.set_image_dim_ordering('th')
def base_model():
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model=base_model()
print(model.summary())
#model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
history=model.fit(X_train,y_train1,batch_size=256,epochs=2,verbose=1,validation_data=(X_test,y_test1))
[test_loss,test_acc]=model.evaluate(X_test,y_test1)
print(test_loss,test_acc)
| [
"sapireddyrahul@gmail.com"
] | sapireddyrahul@gmail.com |
fefab262486144747a8e0d278c7d3a03347762cf | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /XTXZRmvXbhmhSfiPf_3.py | 85a8644f2b6410a31135bfa3a270eb2cf45019db | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | """
The function is given a list of numbers where each number appears three times
except for one which appears only one time. Find the single number and return
it.
### Examples
single_number([2, 2, 3, 2]) ➞ 3
single_number([0, 1, 0, 1, 0, 1, 99]) ➞ 99
single_number([-1, 2, -4, 20, -1, 2, -4, -4, 2, -1]) ➞ 20
### Notes
To run under 12 seconds the function needs to be efficient.
"""
def single_number(r):
s = sorted(r)
for i in range(0, len(s), 3):
if i == len(s)-1 or s[i] < s[i+1]: return s[i]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7a889dea2800df0da37526db5e54bd0ad918b872 | ff1146409edb8fb5423b0713a63dc08d70329423 | /tools/posixusers_baseline.py | c45e54f1a4754bc1e466482fcb721cb072f89888 | [
"mpich2",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | welex91/bcfg2 | 378b93fa8b3d3616e370230e0454c6b9314291f7 | 35851347089db1a092ec715cb183aec19f19e983 | refs/heads/master | 2021-01-15T21:20:47.264768 | 2013-07-27T22:52:33 | 2013-07-27T22:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | #!/usr/bin/env python
import grp
import sys
import logging
import lxml.etree
import Bcfg2.Logger
from Bcfg2.Client.Tools.POSIXUsers import POSIXUsers
from Bcfg2.Options import OptionParser, Option, get_bool, CLIENT_COMMON_OPTIONS
def get_setup():
optinfo = CLIENT_COMMON_OPTIONS
optinfo['nouids'] = Option("Do not include UID numbers for users",
default=False,
cmd='--no-uids',
long_arg=True,
cook=get_bool)
optinfo['nogids'] = Option("Do not include GID numbers for groups",
default=False,
cmd='--no-gids',
long_arg=True,
cook=get_bool)
setup = OptionParser(optinfo)
setup.parse(sys.argv[1:])
if setup['args']:
print("posixuser_[baseline.py takes no arguments, only options")
print(setup.buildHelpMessage())
raise SystemExit(1)
level = 30
if setup['verbose']:
level = 20
if setup['debug']:
level = 0
Bcfg2.Logger.setup_logging('posixusers_baseline.py',
to_syslog=False,
level=level,
to_file=setup['logging'])
return setup
def main():
setup = get_setup()
if setup['file']:
config = lxml.etree.parse(setup['file']).getroot()
else:
config = lxml.etree.Element("Configuration")
users = POSIXUsers(logging.getLogger('posixusers_baseline.py'),
setup, config)
baseline = lxml.etree.Element("Bundle", name="posixusers_baseline")
for entry in users.FindExtra():
data = users.existing[entry.tag][entry.get("name")]
for attr, idx in users.attr_mapping[entry.tag].items():
if (entry.get(attr) or
(attr == 'uid' and setup['nouids']) or
(attr == 'gid' and setup['nogids'])):
continue
entry.set(attr, str(data[idx]))
if entry.tag == 'POSIXUser':
entry.set("group", grp.getgrgid(data[3])[0])
for group in users.user_supplementary_groups(entry):
memberof = lxml.etree.SubElement(entry, "MemberOf",
group=group[0])
entry.tag = "Bound" + entry.tag
baseline.append(entry)
print(lxml.etree.tostring(baseline, pretty_print=True))
if __name__ == "__main__":
sys.exit(main())
| [
"chris.a.st.pierre@gmail.com"
] | chris.a.st.pierre@gmail.com |
dec9df7b0db35af6fd70ebdc5cdacaaa3fd4267a | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtGui/QHelpEvent.py | 069b42cfa6133d6f0f6067febbc11072be04944b | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # encoding: utf-8
# module PySide2.QtGui
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QHelpEvent(__PySide2_QtCore.QEvent):
# no doc
def globalPos(self, *args, **kwargs): # real signature unknown
pass
def globalX(self, *args, **kwargs): # real signature unknown
pass
def globalY(self, *args, **kwargs): # real signature unknown
pass
def pos(self, *args, **kwargs): # real signature unknown
pass
def x(self, *args, **kwargs): # real signature unknown
pass
def y(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| [
"noreply@github.com"
] | cundesi.noreply@github.com |
01fc4cccfef4705e3ea21f0c23aebf953ce14bd7 | c6ed9aa97166d4778b89321b580af80c543bacc9 | /hackerrank/epiccode/linesegments.py | 0ad146b45b4645690b07b6464178c2ed0b38f9b2 | [] | no_license | bradyz/sandbox | 381bcaf2f3719dee142a00858f7062aeff98d1ab | ff90335b918886d5b5956c6c6546dbfde5e7f5b3 | refs/heads/master | 2021-01-23T09:03:54.697325 | 2018-02-27T20:47:48 | 2018-02-27T20:47:48 | 21,292,856 | 10 | 0 | null | 2015-09-03T16:53:15 | 2014-06-28T00:29:18 | Python | UTF-8 | Python | false | false | 415 | py | n = int(input())
c = [list(map(int, input().split())) for _ in range(n)]
c.sort(key=lambda x: (x[0], x[1]-x[0]))
r = [c[0]]
for i in range(1, len(c)):
if not c[i][0] == r[-1][0] and c[i][1] > r[-1][1]:
r.append(c[i])
c.sort(key=lambda x: (x[1], x[1]-x[0]))
s = [c[-1]]
for i in range(n-2, -1, -1):
if not c[i][1] == s[-1][1] and c[i][0] < s[-1][0]:
s.append(c[i])
print(max(len(r), len(s)))
| [
"brady.zhou@utexas.edu"
] | brady.zhou@utexas.edu |
c7bd258a9bc0e2eaf28121031868245c0b6c64ae | abedd50e90c4083d809168776e15ea8496ba9c7e | /Amazon/572. Subtree of Another Tree.py | 45d1a8b26a354cbe8935f83236269946a8bf9a8d | [] | no_license | Iansdfg/Interview_prep_2019Fall | 46bfe75d7dd78ca83390319c0eeebbe763bbc909 | d85e5b3cd9318b7f08908c26177ae872b4f5324d | refs/heads/master | 2022-06-07T23:01:13.770986 | 2020-04-30T17:39:50 | 2020-04-30T17:39:50 | 216,441,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
found = False
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
self.helper(s, t)
return self.found
def helper(self, s, t):
if not s:
return None
if self.isSameTree(s, t):
self.found = True
left = self.helper(s.left, t)
right = self.helper(s.right, t)
def isSameTree(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
return s.val == t.val and self.isSameTree(s.left, t.left) and self.isSameTree(s.right, t.right)
| [
"noreply@github.com"
] | Iansdfg.noreply@github.com |
29228281397ab0bf3b9daeebe90890fd1a7fae89 | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/test/test_imghdr.py | ba6b4479483d4ef12c4372a5ab1b978943aab0ea | [
"MIT"
] | permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 4,629 | py | import imghdr
import io
import os
import pathlib
import unittest
import warnings
from test.support import findfile, TESTFN, unlink
TEST_FILES = ('python.png', 'png'), ('python.gif', 'gif'), ('python.bmp', 'bmp'
), ('python.ppm', 'ppm'), ('python.pgm', 'pgm'), ('python.pbm', 'pbm'), (
'python.jpg', 'jpeg'), ('python.ras', 'rast'), ('python.sgi', 'rgb'), (
'python.tiff', 'tiff'), ('python.xbm', 'xbm'), ('python.webp', 'webp'), (
'python.exr', 'exr')
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class TestImghdr(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testfile = findfile('python.png', subdir='imghdrdata')
with open(cls.testfile, 'rb') as stream:
cls.testdata = stream.read()
def tearDown(self):
unlink(TESTFN)
def test_data(self):
for filename, expected in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(filename), expected)
with open(filename, 'rb') as stream:
self.assertEqual(imghdr.what(stream), expected)
with open(filename, 'rb') as stream:
data = stream.read()
self.assertEqual(imghdr.what(None, data), expected)
self.assertEqual(imghdr.what(None, bytearray(data)), expected)
def test_pathlike_filename(self):
for filename, expected in TEST_FILES:
with self.subTest(filename=filename):
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(pathlib.Path(filename)), expected)
def test_register_test(self):
def test_jumbo(h, file):
if h.startswith(b'eggs'):
return 'ham'
imghdr.tests.append(test_jumbo)
self.addCleanup(imghdr.tests.pop)
self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
def test_file_pos(self):
with open(TESTFN, 'wb') as stream:
stream.write(b'ababagalamaga')
pos = stream.tell()
stream.write(self.testdata)
with open(TESTFN, 'rb') as stream:
stream.seek(pos)
self.assertEqual(imghdr.what(stream), 'png')
self.assertEqual(stream.tell(), pos)
def test_bad_args(self):
with self.assertRaises(TypeError):
imghdr.what()
with self.assertRaises(AttributeError):
imghdr.what(None)
with self.assertRaises(TypeError):
imghdr.what(self.testfile, 1)
with self.assertRaises(AttributeError):
imghdr.what(os.fsencode(self.testfile))
with open(self.testfile, 'rb') as f:
with self.assertRaises(AttributeError):
imghdr.what(f.fileno())
def test_invalid_headers(self):
for header in (b'\x89PN\r\n', b'\x01\xd9', b'Y\xa6', b'cutecat',
b'000000JFI', b'GIF80'):
self.assertIsNone(imghdr.what(None, header))
def test_string_data(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
for filename, _ in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
with open(filename, 'rb') as stream:
data = stream.read().decode('latin1')
with self.assertRaises(TypeError):
imghdr.what(io.StringIO(data))
with self.assertRaises(TypeError):
imghdr.what(None, data)
def test_missing_file(self):
with self.assertRaises(FileNotFoundError):
imghdr.what('missing')
def test_closed_file(self):
stream = open(self.testfile, 'rb')
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
stream = io.BytesIO(self.testdata)
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
def test_unseekable(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
with UnseekableIO(TESTFN, 'rb') as stream:
with self.assertRaises(io.UnsupportedOperation):
imghdr.what(stream)
def test_output_stream(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
stream.seek(0)
with self.assertRaises(OSError) as cm:
imghdr.what(stream)
if __name__ == '__main__':
unittest.main()
| [
"emily@cuttlesoft.com"
] | emily@cuttlesoft.com |
4df9f56fbde1e62d2124e3ad7417697759b2045d | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.24/_downloads/9d439f87e921721175185f535b2ec58f/70_eeg_mri_coords.py | fb58242389733df3704e596fc7bfea93041b1803 | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 6,643 | py | # -*- coding: utf-8 -*-
"""
.. _tut-eeg-mri-coords:
EEG source localization given electrode locations on an MRI
===========================================================
This tutorial explains how to compute the forward operator from EEG data
when the electrodes are in MRI voxel coordinates.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
import os.path as op
import nibabel
from nilearn.plotting import plot_glass_brain
import numpy as np
import mne
from mne.channels import compute_native_head_t, read_custom_montage
from mne.viz import plot_alignment
##############################################################################
# Prerequisites
# -------------
# For this we will assume that you have:
#
# - raw EEG data
# - your subject's MRI reconstrcted using FreeSurfer
# - an appropriate boundary element model (BEM)
# - an appropriate source space (src)
# - your EEG electrodes in Freesurfer surface RAS coordinates, stored
# in one of the formats :func:`mne.channels.read_custom_montage` supports
#
# Let's set the paths to these files for the ``sample`` dataset, including
# a modified ``sample`` MRI showing the electrode locations plus a ``.elc``
# file corresponding to the points in MRI coords (these were `synthesized
# <https://gist.github.com/larsoner/0ac6fad57e31cb2d9caa77350a9ff366>`__,
# and thus are stored as part of the ``misc`` dataset).
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
bem_dir = op.join(subjects_dir, 'sample', 'bem')
fname_bem = op.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')
fname_src = op.join(bem_dir, 'sample-oct-6-src.fif')
misc_path = mne.datasets.misc.data_path()
fname_T1_electrodes = op.join(misc_path, 'sample_eeg_mri', 'T1_electrodes.mgz')
fname_mon = op.join(misc_path, 'sample_eeg_mri', 'sample_mri_montage.elc')
##############################################################################
# Visualizing the MRI
# -------------------
# Let's take our MRI-with-eeg-locations and adjust the affine to put the data
# in MNI space, and plot using :func:`nilearn.plotting.plot_glass_brain`,
# which does a maximum intensity projection (easy to see the fake electrodes).
# This plotting function requires data to be in MNI space.
# Because ``img.affine`` gives the voxel-to-world (RAS) mapping, if we apply a
# RAS-to-MNI transform to it, it becomes the voxel-to-MNI transformation we
# need. Thus we create a "new" MRI image in MNI coordinates and plot it as:
img = nibabel.load(fname_T1_electrodes) # original subject MRI w/EEG
ras_mni_t = mne.transforms.read_ras_mni_t('sample', subjects_dir) # from FS
mni_affine = np.dot(ras_mni_t['trans'], img.affine) # vox->ras->MNI
img_mni = nibabel.Nifti1Image(img.dataobj, mni_affine) # now in MNI coords!
plot_glass_brain(img_mni, cmap='hot_black_bone', threshold=0., black_bg=True,
resampling_interpolation='nearest', colorbar=True)
##########################################################################
# Getting our MRI voxel EEG locations to head (and MRI surface RAS) coords
# ------------------------------------------------------------------------
# Let's load our :class:`~mne.channels.DigMontage` using
# :func:`mne.channels.read_custom_montage`, making note of the fact that
# we stored our locations in Freesurfer surface RAS (MRI) coordinates.
#
# .. collapse:: |question| What if my electrodes are in MRI voxels?
# :class: info
#
# If you have voxel coordinates in MRI voxels, you can transform these to
# FreeSurfer surface RAS (called "mri" in MNE) coordinates using the
# transformations that FreeSurfer computes during reconstruction.
# ``nibabel`` calls this transformation the ``vox2ras_tkr`` transform
# and operates in millimeters, so we can load it, convert it to meters,
# and then apply it::
#
# >>> pos_vox = ... # loaded from a file somehow
# >>> img = nibabel.load(fname_T1)
# >>> vox2mri_t = img.header.get_vox2ras_tkr() # voxel -> mri trans
# >>> pos_mri = mne.transforms.apply_trans(vox2mri_t, pos_vox)
# >>> pos_mri /= 1000. # mm -> m
#
# You can also verify that these are correct (or manually convert voxels
# to MRI coords) by looking at the points in Freeview or tkmedit.
dig_montage = read_custom_montage(fname_mon, head_size=None, coord_frame='mri')
dig_montage.plot()
##############################################################################
# We can then get our transformation from the MRI coordinate frame (where our
# points are defined) to the head coordinate frame from the object.
trans = compute_native_head_t(dig_montage)
print(trans) # should be mri->head, as the "native" space here is MRI
##############################################################################
# Let's apply this digitization to our dataset, and in the process
# automatically convert our locations to the head coordinate frame, as
# shown by :meth:`~mne.io.Raw.plot_sensors`.
raw = mne.io.read_raw_fif(fname_raw)
raw.pick_types(meg=False, eeg=True, stim=True, exclude=()).load_data()
raw.set_montage(dig_montage)
raw.plot_sensors(show_names=True)
##############################################################################
# Now we can do standard sensor-space operations like make joint plots of
# evoked data.
raw.set_eeg_reference(projection=True)
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events)
cov = mne.compute_covariance(epochs, tmax=0.)
evoked = epochs['1'].average() # trigger 1 in auditory/left
evoked.plot_joint()
##############################################################################
# Getting a source estimate
# -------------------------
# New we have all of the components we need to compute a forward solution,
# but first we should sanity check that everything is well aligned:
fig = plot_alignment(
evoked.info, trans=trans, show_axes=True, surfaces='head-dense',
subject='sample', subjects_dir=subjects_dir)
##############################################################################
# Now we can actually compute the forward:
fwd = mne.make_forward_solution(
evoked.info, trans=trans, src=fname_src, bem=fname_bem, verbose=True)
##############################################################################
# Finally let's compute the inverse and apply it:
inv = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov, verbose=True)
stc = mne.minimum_norm.apply_inverse(evoked, inv)
brain = stc.plot(subjects_dir=subjects_dir, initial_time=0.1)
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
3aa9696f3620ecd552b6b77a018f7d1531e3d273 | 119a85a388fe436361530fbb47932e704d749557 | /PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/web/tests/test_resources.py | f6913098fa3d88ac835cf930480c39f61c6e7a52 | [
"Python-2.0"
] | permissive | chrisrgunn/cs156project | 014d5b05c6bf0e08ab8bd0dea525057d0e65b9a7 | e5414a37f9793c8b0674695b948482b559b18ea6 | refs/heads/master | 2021-01-19T14:09:49.046539 | 2017-05-24T02:10:29 | 2017-05-24T02:10:29 | 88,128,762 | 0 | 2 | null | 2017-05-04T23:49:09 | 2017-04-13T05:36:10 | Python | UTF-8 | Python | false | false | 7,737 | py | from unittest import TestCase, makeSuite, TestSuite
from peak.api import *
from peak.tests import testRoot
from test_templates import TestApp, BasicTest
class ResourceApp1(TestApp):
# This makes all 'peak.*' package resources available for testing;
# Ordinarily, you'd do this via a config file, but this is quick and easy
__makePkgAvailable = binding.Make(lambda: True,
offerAs = ['peak.web.resource_packages.peak.*']
)
show = web.bindResource('template1')
class MethodTest1(BasicTest):
appClass = ResourceApp1
def setUp(self):
r = testRoot()
self.policy = web.TestPolicy(self.appClass(r))
class ResourceApp2(ResourceApp1):
show = web.bindResource('template2')
xml = web.bindResource(
'/peak.running/EventDriven', metadata=[security.Anybody]
)
class MethodTest2(MethodTest1):
appClass = ResourceApp2
rendered = """<body xmlns:this="mid:pwt-this@peak-dev.org">
<h1>The title (with <xml/> & such in it)</h1>
<ul><li>1</li><li>2</li><li>3</li></ul>
<a href="++resources++/peak.running/EventDriven.xml">
The EventDriven.xml file, found at
http://127.0.0.1/++resources++/peak.running/EventDriven.xml
</a>
</body>
"""
class LocationTests(TestCase):
def setUp(self):
self.root = web.Location(testRoot())
self.policy = web.TestPolicy(self.root)
self.ctx = self.policy.newContext()
def testBasics(self):
self.failUnless(web.IConfigurableLocation(self.root) is self.root)
self.assertEqual(self.root.place_url, '')
def testContainers(self):
c1 = {'bar':'baz'}; c2={'foo':'bar'}
self.assertEqual(self.ctx.traverseName('foo',None), None)
self.assertEqual(self.ctx.traverseName('bar',None), None)
self.root.addContainer(c1,security.Nobody)
self.assertEqual(self.ctx.traverseName('foo',None), None)
self.assertEqual(self.ctx.traverseName('bar',None), None)
self.root.addContainer(c2)
self.assertEqual(self.ctx.traverseName('foo',None).current, 'bar')
self.assertEqual(self.ctx.traverseName('bar',None), None)
self.root.addContainer(c1,security.Anybody)
self.assertEqual(self.ctx.traverseName('foo',None).current, 'bar')
self.assertEqual(self.ctx.traverseName('bar',None).current, 'baz')
self.failUnless(
web.TraversalPath('bar/..').traverse(self.ctx).current is self.root
)
def testOffers(self):
self.root.addContainer({'bar':'baz'})
self.root.registerLocation('test.1','bar')
self.assertEqual(
self.ctx.traverseName('++id++test.1',None).current,'baz'
)
self.root.registerLocation('test2','.')
self.failUnless(self.ctx.traverseName('++id++test2') is self.ctx)
def testAppViews(self):
self.checkView(self.root,int,123)
self.checkView(self.root,protocols.IOpenProtocol,web.IWebTraversable)
def checkView(self,loc,tgt,src):
bar_handler = lambda ctx,o,ns,nm,qn,d: ctx.childContext(qn,"baz")
subLoc = web.Location(loc)
loc.addContainer({'spam':subLoc})
subLoc.registerView(tgt,'bar',bar_handler)
subLoc.addContainer({'foo':src})
ctx = web.TraversalPath('spam/foo/@@bar').traverse(
self.ctx.clone(current=loc)
)
self.assertEqual(ctx.current,'baz')
def testNestedViews(self):
loc = web.Location(self.root)
self.checkView(loc,int,123)
loc = web.Location(self.root)
self.checkView(loc,protocols.IOpenProtocol,web.IWebTraversable)
def testLocationView(self):
loc = web.Location(self.root)
bar_handler = lambda ctx,o,ns,nm,qn,d: ctx.childContext(qn,"baz")
loc.registerView(None,'bar',bar_handler)
loc.addContainer({'foo':loc})
ctx = web.TraversalPath('foo/@@bar').traverse(
self.ctx.clone(current=loc)
)
self.assertEqual(ctx.current,'baz')
def testContainerSequence(self):
c1 = {'foo':'baz'}; c2={'foo':'bar'}
self.assertEqual(self.ctx.traverseName('foo',None), None)
self.root.addContainer(c1)
self.assertEqual(self.ctx.traverseName('foo',None).current, 'baz')
self.root.addContainer(c2)
self.assertEqual(self.ctx.traverseName('foo',None).current, 'bar')
class ResourceTests(TestCase):
def testSubObjectRejection(self):
# Files and templates shouldn't allow subitems in PATH_INFO
paths = 'peak.web/resource_defaults.ini', 'peak.web.tests/template1'
policy = web.TestPolicy(ResourceApp1(testRoot()))
for path in paths:
try:
policy.simpleTraverse('/++resources++/%s/subitem' % path, True)
except web.NotFound,v:
self.assertEqual(v.args[0], "subitem")
else:
raise AssertionError("Should have raised NotFound:", path)
def testURLcalculations(self):
# Root locations: direct children of a non-IPlace parent
r=web.Resource(testRoot())
self.assertEqual(r.place_url,'')
r=web.Resource(testRoot(),'foo')
self.assertEqual(r.place_url,'')
# Skin path is resource prefix
policy = web.TestPolicy(ResourceApp1(testRoot()))
ctx = policy.simpleTraverse('/++resources++', False)
self.assertEqual(ctx.current.place_url, '++resources++')
# Skin children should include resource prefix
ctx2 = ctx.traverseName('peak.web')
self.assertEqual(ctx2.current.place_url, '++resources++/peak.web')
# check absolute ("mount point") URL
r=web.Resource(testRoot(),'foo',place_url="http://example.com/foo")
ctx = policy.newContext()
ctx = ctx.childContext('foo',r)
self.assertEqual(ctx.absoluteURL, ctx.current.place_url)
class IntegrationTests(TestCase):
def setUp(self):
self.policy = web.TestPolicy(
testRoot().lookupComponent(
'ref:sitemap@pkgfile:peak.web.tests/test-sitemap.xml'
)
); self.traverse = self.policy.simpleTraverse
def testIntView(self):
self.assertEqual(self.traverse('123/index_html'),
"<html>\n" " <head>\n" " <title>Python Object 123</title>\n"
" </head>\n"
" <body>\n"
"<h1>Python Object 123</h1>\n" "<hr />\n"
"My URL is http://127.0.0.1/123.\n"
"The server name as a property: "
"<span>PropertyName('127.0.0.1')</span>\n"
"<hr />\n" "</body></html>\n"
)
def testListView(self):
self.assertEqual(self.traverse('both/index_html'),
"<html>\n" " <head>\n"
" <title>Python Object [123, 'abc']</title>\n"
" </head>\n"
" <body>\n" "<h1>Python Object [123, 'abc']</h1>\n" "<hr />\n"
"<table><tr>\n" "<td>Name</td><td>Last modified</td><td>Size</td>"
"<td>Description</td>\n" "</tr><tr>\n"
"<td><a href=\"../123\">123</a></td>\n" "<td /><td /><td />\n"
"</tr><tr>\n" "<td><a href=\"../abc\">'abc'</a></td>\n"
"<td /><td /><td />\n" "</tr></table>\n" "<hr />\n"
"</body></html>\n"
)
def testViewDef(self):
self.assertEqual(self.traverse('/@@index_html', False).current, 1)
def testLayout(self):
self.assertEqual(self.traverse('123/layout-test'),
"<html><div>123</div></html>\n")
TestClasses = (
LocationTests, MethodTest1, MethodTest2, ResourceTests, IntegrationTests
)
def test_suite():
return TestSuite([makeSuite(t,'test') for t in TestClasses])
| [
"chrisrgunn@gmail.com"
] | chrisrgunn@gmail.com |
17a71d2d718371ab5a42362bd4621ac7f2623db6 | e910f6f0ffd8c25e8aeb63d36dd1d899bf61c862 | /pyEX/stats.py | 12275339d638a6e2c571e70fe7b7ed5f962cd85d | [
"Apache-2.0"
] | permissive | knightsUCF/pyEX | 2891415738ae9c0888f45102973a7baa5ffb39c7 | a6e190c0fffe5757436e4a94f729e6e232cfe66b | refs/heads/master | 2020-04-19T13:20:32.206133 | 2019-01-24T21:59:01 | 2019-01-24T21:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | import pandas as pd
from datetime import datetime
from .common import _getJson, PyEXception, _strOrDate, _reindex, _toDatetime
def stats(token='', version=''):
'''https://iextrading.com/developer/docs/#intraday'''
return _getJson('stats/intraday', token, version)
def statsDF(token='', version=''):
'''https://iextrading.com/developer/docs/#intraday'''
df = pd.DataFrame(stats(token, version))
_toDatetime(df)
return df
def recent(token='', version=''):
'''https://iextrading.com/developer/docs/#recent'''
return _getJson('stats/recent', token, version)
def recentDF(token='', version=''):
'''https://iextrading.com/developer/docs/#recent'''
df = pd.DataFrame(recent(token, version))
_toDatetime(df)
_reindex(df, 'date')
return df
def records(token='', version=''):
'''https://iextrading.com/developer/docs/#records'''
return _getJson('stats/records', token, version)
def recordsDF(token='', version=''):
'''https://iextrading.com/developer/docs/#records'''
df = pd.DataFrame(records(token, version))
_toDatetime(df)
return df
def summary(date=None, token='', version=''):
'''https://iextrading.com/developer/docs/#historical-summary'''
if date:
if isinstance(date, str):
return _getJson('stats/historical?date=' + date, token, version)
elif isinstance(date, datetime):
return _getJson('stats/historical?date=' + date.strftime('%Y%m'), token, version)
else:
raise PyEXception("Can't handle type : %s" % str(type(date)), token, version)
return _getJson('stats/historical', token, version)
def summaryDF(date=None, token='', version=''):
'''https://iextrading.com/developer/docs/#historical-summary'''
df = pd.DataFrame(summary(date, token, version))
_toDatetime(df)
return df
def daily(date=None, last='', token='', version=''):
'''https://iextrading.com/developer/docs/#historical-daily'''
'''https://iextrading.com/developer/docs/#historical-summary'''
if date:
date = _strOrDate(date)
return _getJson('stats/historical/daily?date=' + date, token, version)
elif last:
return _getJson('stats/historical/daily?last=' + last, token, version)
return _getJson('stats/historical/daily', token, version)
def dailyDF(date=None, last='', token='', version=''):
'''https://iextrading.com/developer/docs/#historical-daily'''
'''https://iextrading.com/developer/docs/#historical-summary'''
df = pd.DataFrame(daily(date, last, token, version))
_toDatetime(df)
return df
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
e452d7cac2546a0648c54b57dbea9aa760d975b1 | 98b8aeac8dea6bb470fbd2a27df10f8692d1bc9a | /mysite/product/migrations/0010_auto_20140907_0418.py | af901d1e46500f42696c371161b8a6e9a6975d7c | [] | no_license | grantnicholas/django_store | 4031d3c95c9801192b90704d61a9a888d4273ffc | 13df2b2ad7a4c87ba16740ffe2671fad49431518 | refs/heads/master | 2021-01-10T22:03:55.772219 | 2014-09-15T17:36:48 | 2014-09-15T17:36:48 | 24,066,144 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0009_auto_20140907_0345'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(default=b'pic_folder/no-img.png', upload_to=b'pic_folder/', null=True, verbose_name=b'Image', blank=True),
),
]
| [
"grantnicholas2015@u.northwestern.edu"
] | grantnicholas2015@u.northwestern.edu |
e7ecd3bf9c6117e3bf3d3b1325f4bd8856214ace | 22e9f383a50c66c48349ce7102fdc67f06ff7da4 | /intrest.py | 702c913d5475b3e67c97e1e0ac6ea46bf083d2d5 | [] | no_license | jikka/pythong | 2873c4a5ef99fd85fd04c122a90a108cb67b2755 | d5357bf672e98b51378a7534223f86e59fa768fc | refs/heads/master | 2020-04-24T07:37:32.670541 | 2018-08-15T01:17:23 | 2018-08-15T01:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | a=int(input("\nenter the principal amount"))
b=int(input("\nenter the time"))
c=int(input("\nenter the rate"))
d=(a*b*c)/100
print("\n",int(d))
| [
"noreply@github.com"
] | jikka.noreply@github.com |
c37832646a2a4d9694c531342d06ce7ab3610db9 | 7d42ce4acfe80bc3fa9e2fcdb28be62b4310aab8 | /utils/identity_initializer.py | 9a2df6a8b34265926104e694d93dfd5cd06342e3 | [] | no_license | tomarraj008/AQUAA | ed19a0b59a7d67ef9db85e474e902390440e165c | d0022051b7055b175626aefe808cc6c474c58232 | refs/heads/master | 2020-05-04T15:43:43.049417 | 2018-12-20T17:39:09 | 2018-12-20T17:39:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops.init_ops import Initializer
class identity_initializer(Initializer):
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return linalg_ops.eye(shape[0], shape[1], dtype=dtype)
def get_config(self):
return {"dtype": self.dtype.name} | [
"shubham.ddash@gmail.com"
] | shubham.ddash@gmail.com |
61045aa44e926ec7065591922b56df967aee9881 | 5add80be09ee754fced03e512a9acc214971cddf | /python-code/dlib-learning/face-recognition-resnet.py | b1cc0de60705de3aae2a390b8546c637fe12845a | [
"Apache-2.0"
] | permissive | juxiangwu/image-processing | f774a9164de9c57e88742e6185ac3b28320eae69 | c644ef3386973b2b983c6b6b08f15dc8d52cd39f | refs/heads/master | 2021-06-24T15:13:08.900960 | 2019-04-03T10:28:44 | 2019-04-03T10:28:44 | 134,564,878 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py |
import dlib
import numpy as np
import cv2
import os
import json
import matplotlib.pyplot as plt
# --------------加载数据----------------#
detector = dlib.cnn_face_detection_model_v1('../resources/models/dlib/mmod_human_face_detector.dat')
#detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('../resources/models/dlib//shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1('../resources/models/dlib//dlib_face_recognition_resnet_model_v1.dat')
# -------------------------------------------
imagePath = 'temp/faces/'
pnum=np.size(os.listdir(imagePath)); #图像的目录
data = np.zeros((1,128)) #定义一个128维的空向量data
label = []
IMAGEs=list(np.zeros((pnum,1)));
LABELs=list(np.zeros((pnum,1))); #定义空的list存放人脸的标签
index=0;
Face_Vector=np.zeros((128,pnum))
for file in os.listdir(imagePath): #开始一张一张索引目录中的图像
if '.jpg' in file or '.png' in file:
fileName = file
labelName = file.split('_')[0] #获取标签名
print('current image: ', file)
#print('current label: ', labelName)
img = cv2.imread(imagePath + file) #使用opencv读取图像数据
if img.shape[0]*img.shape[1] > 500000: #如果图太大的话需要压缩,这里像素的阈值可以自己设置
img = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
dets = detector(img, 1) #使用检测算子检测人脸,返回的是所有的检测到的人脸区域
for k, d in enumerate(dets):
rec = dlib.rectangle(d.rect.left(),d.rect.top(),d.rect.right(),d.rect.bottom())
shape = sp(img, rec) #获取landmark
face_descriptor = facerec.compute_face_descriptor(img, shape) #使用resNet获取128维的人脸特征向量
Face_Vector[:,index]=face_descriptor #记录所有脸特征
faceArray = np.array(face_descriptor).reshape((1, 128)) #转换成numpy中的数据结构
data = np.concatenate((data, faceArray))
#显示人脸区域 #拼接到事先准备好的data当中去
label.append(labelName) #保存标签
cv2.rectangle(img, (rec.left(), rec.top()), (rec.right(), rec.bottom()), (0, 255, 0), 2)
#cv2.waitKey(2)
#cv2.imshow('image', img)
IMAGEs[index]=img;
LABELs[index]=file
index+=1
data = data[1:, :] #因为data的第一行是空的128维向量,所以实际存储的时候从第二行开始
np.savetxt('faceData.txt', data, fmt='%f') #保存人脸特征向量合成的矩阵到本地
#labelFile=open('label.txt','w')
#json.dump(label, labelFile) #使用json保存list到本地
#labelFile.close()
cv2.destroyAllWindows()
Face_dist=np.zeros((pnum,pnum))
for i in range(pnum):
for j in range(pnum):
Face_dist[i,j]=np.linalg.norm(Face_Vector[:,i]-Face_Vector[:,j])
#for k in range(pnum):
# for s in range(blcoksize):
# plt.imshow(IMAGEs[k*blcoksize+s])
# plt.show()
import copy
Knn=5 #K最近邻
record=np.zeros((pnum,Knn))
blcoksize=3
ss=int(Knn/blcoksize)+1
for i in range(pnum):
plt.figure(figsize=(blcoksize*4,ss*4),dpi=80)
plt.subplot(ss,blcoksize,1)
plt.imshow(IMAGEs[i])
u=copy.copy(Face_dist[:,i])
u[i]=1
for k in range(Knn):
j=np.argmin(u)
u[j]=k+10
record[i,k]=j
plt.subplot(ss,blcoksize,k+2)
plt.imshow(IMAGEs[j])
plt.title(LABELs[j][:-4])
plt.show() | [
"kkoolerter@gmail.com"
] | kkoolerter@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.