blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e94da9aee7f4ec4c88a7830ea7b688d7c02f3c68 | 8ac3fcdd9647b8898d1f7cf941465e906853b299 | /calc/coulomb_short/phase_shift/dvr/e05/plot_r0.py | 73943a5d28bcf251fa3a06f80c1f38ab797bbacf | [] | no_license | ReiMatsuzaki/rescol | a8589eb624782d081fa8f3d884d8ddee2993ed80 | 46c205a3228423f5be97eeaa68bcb7a4b0f31888 | refs/heads/master | 2021-05-04T10:36:11.090876 | 2016-11-01T11:12:04 | 2016-11-01T11:12:04 | 43,137,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt("r0_phase.dat")
plt.plot(data.T[0], data.T[1], "o")
print data.T[0]
print data.T[1]
plt.savefig("r0_phase.png")
| [
"matsuzaki.rei@gmail.com"
] | matsuzaki.rei@gmail.com |
bf676fd203fb557be112829a2870713943ec00ef | b385fc2f18bbb43ec1bca1606b62ae83f33dcb2d | /Python_OOP/Polymorphism_and_Magic_Methods/account.py | fd2b8dd878864a03c63b1f47773ec814a67ff7f5 | [] | no_license | rishinkaku/Software-University---Software-Engineering | d9bee36de12affc9aed7fcc0b8b6616768340e51 | b798a0c6927ef461491c8327451dd00561d836e4 | refs/heads/master | 2023-06-10T19:52:51.016630 | 2021-07-08T00:45:06 | 2021-07-08T00:45:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | class Account:
def __init__(self, owner, amount=0):
self.owner = owner
self.amount = amount
self._transactions = []
def __add__(self, other):
account = Account(f'{self.owner}&{other.owner}', self.amount + other.amount)
account._transactions = self._transactions + other._transactions
return account
def __str__(self):
return f"{__class__.__name__} of {self.owner} with starting amount: {self.amount}"
def __repr__(self):
return f"{__class__.__name__}({self.owner}, {self.amount})"
def __len__(self):
return len(self._transactions)
def __getitem__(self, index):
return self._transactions[index]
def __reversed__(self):
return reversed(self._transactions)
def __eq__(self, other):
return self.balance == other.balance
def __gt__(self, other):
return self.balance > other.balance
def __ge__(self, other):
return self.balance >= other.balance
def add_transaction(self, amount):
if not isinstance(amount, int):
raise ValueError('please use int for amount')
self._transactions.append(amount)
@property
def balance(self):
return self.amount + sum(self._transactions)
@staticmethod
def validate_transaction(account, amount_to_add):
if account.amount + amount_to_add < 0:
raise ValueError("sorry cannot go in debt!")
account._transactions.append(amount_to_add)
return f"New balance: {account.balance}"
acc = Account('bob', 10)
acc2 = Account('john')
print(acc) # Account of bob with starting amount: 10
print(repr(acc)) # Account(bob, 10)
acc.add_transaction(20)
acc.add_transaction(-20)
acc.add_transaction(30)
print(acc.balance) # 40
print(len(acc)) # 3
for transaction in acc:
print(transaction) # 20, -20, 30
print(acc[1]) # -20
print(list(reversed(acc))) # [30, -20, 20]
acc2.add_transaction(10)
acc2.add_transaction(60)
print(acc > acc2) # F
print(acc >= acc2) # F
print(acc < acc2) # T
print(acc <= acc2) # T
print(acc == acc2) # F
print(acc != acc2) # T
acc3 = acc + acc2
print(acc3) # Account of bob&john with starting amount: 10
print(acc3._transactions) # [20, -20, 30, 10, 60]
| [
"66394357+DimAntDim@users.noreply.github.com"
] | 66394357+DimAntDim@users.noreply.github.com |
f8ddbafef1773f98c0b8602eb3236035455186d4 | 975ecceab70eb4d75b1fe1b61a14c8375c291adb | /3.2_introduction_to_python_II/in_class/05-Ins_BasicRead/readFile.py | 201d1a6792fb077315b8defad7a519207985bb6c | [] | no_license | carlabeltran/data_analytics_visualization | 82306b44570ba89ef298d8cf07f9151e0d7cb031 | 7985138ff3fbbdcf077c08aaea6dcb64f12b9a22 | refs/heads/master | 2020-12-09T17:38:34.458647 | 2020-01-12T03:03:35 | 2020-01-12T03:03:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # relative path to file:
file = "Resources/input.txt"
# with is the english equivalent of "assume"
with open(file, 'r') as text:
lines = text.read()
print(lines)
| [
"philipstubbs13@gmail.com"
] | philipstubbs13@gmail.com |
c6671ddad1c1408fa6e63bb336ac6a38fbfebca5 | b22e46198a95af0ce1db91a213fbc14ae00f47fc | /src/components/panel.py | bb6eef62b0e582a40c31491ed2c284a29effc3c6 | [
"MIT"
] | permissive | JacobLondon/pyngine | 5ec081e353782a348ddad3160ac52670e16fa20a | a37bea65fdd15642753dd8f8bcaf8899a03f4e1d | refs/heads/master | 2023-06-09T10:05:24.228411 | 2023-06-02T17:25:32 | 2023-06-02T17:25:32 | 164,152,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from .component import Component
class Panel(Component):
"""@brief A simple component used for displaying color.
"""
def __init__(self, controller, parent: Component=None, z: int=0):
Component.__init__(self, controller, parent, z)
self.text = 'Panel'
def load(self):
"""@brief Ensure the panel is anchored correctly.
"""
self.set_anchor()
def draw(self):
"""@brief Fill the area with color.
"""
# draw the area of the panel
self.controller.painter.fill_rect(
self.anchored_loc[0],
self.anchored_loc[1],
self.width,
self.height,
self.background)
| [
"jelondon12@gmail.com"
] | jelondon12@gmail.com |
9459d1e86280da943d06e10f518ad4725c6afbf0 | aa49120740b051eed9b7199340b371a9831c3050 | /islands_1.py | 67e0da76de1096b354edc4f7f43090becf714bf7 | [] | no_license | ashutosh-narkar/LeetCode | cd8d75389e1ab730b34ecd860b317b331b1dfa97 | b62862b90886f85c33271b881ac1365871731dcc | refs/heads/master | 2021-05-07T08:37:42.536436 | 2017-11-22T05:18:23 | 2017-11-22T05:18:23 | 109,366,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | #!/usr/bin/env python
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
You may assume all four edges of the grid are all surrounded by water.
Example 1:
11110
11010
11000
00000
Answer: 1
Example 2:
11000
11000
00100
00011
Answer: 3
'''
# Solution 1: Using DFS. The basic idea of the following solution is merging adjacent lands,
# and the merging should be done recursively.
# Each element is visited once only. So time is O(m * n).
'''
Solution Explanation:
This is an variation of the standard problem:
“Counting number of connected components in a undirected graph”.
A connected component of an undirected graph is a subgraph in which every two vertices are
connected to each other by a path(s), and which is connected to no other vertices outside the subgraph.
A graph where all vertices are connected with each other, has exactly one connected component,
consisting of the whole graph. Such graph with only one connected component is called as Strongly Connected Graph.
The problem can be easily solved by applying DFS() on each component.
In each DFS() call, a component or a sub-graph is visited. We will call DFS on the next un-visited component.
The number of calls to DFS() gives the number of connected components. BFS can also be used.
'''
def numIslands(grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if not grid:
return 0
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
dfs(grid, i, j)
count += 1
return count
def dfs(grid, i, j):
if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or grid[i][j] != "1":
return
grid[i][j] = "#"
dfs(grid, i - 1, j)
dfs(grid, i + 1, j)
dfs(grid, i, j - 1)
dfs(grid, i, j + 1)
# Solution 2: BFS
from collections import deque
def numIslands(grid):
if not grid:
return 0
lands = set()
nrows = len(grid)
ncols = len(grid[0])
# get all the land positions. We will use bfs on these locations
for i in range(nrows):
for j in range(ncols):
if grid[i][j] == '1':
lands.add((i, j))
count = 0
while lands:
# start of an island
row, col = lands.pop()
count += 1
queue = deque()
queue.append((row, col))
# finding the complete island
while queue:
i, j = queue.popleft()
# up
if (i - 1, j) in lands:
queue.append(( i - 1, j))
lands.remove((i - 1, j))
# down
if (i + 1, j) in lands:
queue.append(( i + 1, j))
lands.remove((i + 1, j))
# left
if (i, j - 1) in lands:
queue.append(( i, j - 1))
lands.remove((i, j - 1))
# right
if (i, j + 1) in lands:
queue.append(( i, j + 1))
lands.remove((i, j + 1))
return count
| [
"ashutosh.narkar@one.verizon.com"
] | ashutosh.narkar@one.verizon.com |
2571ba54d5df7e4aaba53e2be4dbca35119a8bb9 | 34031146608b92d4c8a5b9d36c36cb2ba2752acb | /python/bs4/scraper_projects.py | f662645c1dcd4a48de416200148254a0ed37aa78 | [
"MIT"
] | permissive | zkan/100DaysOfCode | a2f98f32771321f024e65d6f763b46b4db47a40c | 3c713ead94a9928e2d0f8d794e49ec202dc64ba3 | refs/heads/master | 2023-01-24T01:14:37.798862 | 2020-04-11T09:21:09 | 2020-04-11T09:21:09 | 159,088,331 | 3 | 0 | MIT | 2023-01-04T01:31:36 | 2018-11-26T00:28:10 | JavaScript | UTF-8 | Python | false | false | 813 | py | import bs4
import requests
BASE_URL = 'https://www.prontotools.io/'
PROJECTS_URL = f'{BASE_URL}projects/'
def pull_site():
raw_site_page = requests.get(PROJECTS_URL)
raw_site_page.raise_for_status()
return raw_site_page
def scrape(site):
project_list = []
soup = bs4.BeautifulSoup(site.text, 'html.parser')
html_project_list = soup.select('h2 a')
for each in html_project_list:
if 'http' not in each['href']:
url = f"{BASE_URL}{each['href']}"
else:
url = each['href']
project_list.append((each.getText(), url))
return project_list
if __name__ == '__main__':
site = pull_site()
print()
print('### Projects')
project_list = scrape(site)
for name, url in project_list:
print(f'* {name} - {url}')
| [
"kan@prontomarketing.com"
] | kan@prontomarketing.com |
1d198bc94bcb5339e093384625828c4160e44524 | ac227cc22d5f5364e5d029a2cef83816a6954590 | /applications/physbam/physbam-lib/Scripts/Archives/render/prepare_render | d595184ba70aaf3645a8425e37a21c1f137070a6 | [
"BSD-3-Clause"
] | permissive | schinmayee/nimbus | 597185bc8bac91a2480466cebc8b337f5d96bd2e | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | refs/heads/master | 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 | BSD-3-Clause | 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null | UTF-8 | Python | false | false | 525 | #!/usr/bin/python
# usage: prepare_render data_source render_directory
import os
import re
import shutil
import sys
# source of data first argument, render directory second argument
src,render=sys.argv[1:]
abs_src=os.path.abspath(src)
os.mkdir(render)
os.chdir(render)
os.symlink(abs_src,"Input")
os.mkdir("Output")
os.chmod("Output",0775)
os.mkdir("Common")
framedep=re.compile(r'.+\.-?[0-9]+(\.gz)?')
map(lambda x: os.symlink("../Input/"+x,"Common/"+x),
filter(lambda x:not framedep.match(x),os.listdir("Input")))
| [
"quhang@stanford.edu"
] | quhang@stanford.edu | |
554142b656d8d5deec0f4d124eee6bcdead0ba1c | 4b89a7de426fb53b999b5f3834404215a90817df | /pyobjc-framework-CoreWLAN/setup.py | db8e3f9acc3da261aabacbd644018c0cb483c4cc | [] | no_license | peeyush-tm/pyobjc | a1f3ec167482566ddc7c895cfa2aca436109cf66 | da488946f6cc67a83dcc26c04484ca4f10fabc82 | refs/heads/master | 2021-01-20T19:26:06.015044 | 2016-05-22T14:53:37 | 2016-05-22T14:53:37 | 60,502,688 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | '''
Wrappers for the "CoreWLAN" framework on MacOS X.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
import os
from pyobjc_setup import setup, Extension
VERSION="3.2a1"
setup(
name='pyobjc-framework-CoreWLAN',
version=VERSION,
description = "Wrappers for the framework CoreWLAN on Mac OS X",
long_description=__doc__,
packages = [ "CoreWLAN" ],
setup_requires = [
'pyobjc-core>=' + VERSION,
],
install_requires = [
'pyobjc-core>=' + VERSION,
'pyobjc-framework-Cocoa>=' + VERSION,
],
ext_modules = [
Extension("CoreWLAN._CoreWLAN",
[ "Modules/_CoreWLAN.m" ],
extra_link_args=["-framework", "CoreWLAN"],
depends=[
os.path.join('Modules', fn)
for fn in os.listdir('Modules')
if fn.startswith('_CoreWLAN')
]
),
],
min_os_level="10.6",
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
4927f0567834e6f5706c9a64a772fcd7bd34f7b1 | bd0cabc8f76d6861e6cc13c72eb178d3e9b697b7 | /adwords_python3_examples_10.1.0/v201710/basic_operations/update_campaign.py | d2404134ccd5503d6b237c63298f3e9146d0a651 | [
"MIT"
] | permissive | xyla-io/hazel | 4d8334766b6176bda21c99fefd4cfb64f05ffc5d | 260ce906761d8b808c21ca61b44cc71ca3329e8c | refs/heads/main | 2022-12-27T13:57:07.421062 | 2020-10-14T19:01:16 | 2020-10-14T19:01:16 | 304,109,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates a campaign by setting its status to PAUSED.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201710')
# Construct operations and update campaign.
operations = [{
'operator': 'SET',
'operand': {
'id': campaign_id,
'status': 'PAUSED'
}
}]
campaigns = campaign_service.mutate(operations)
# Display results.
for campaign in campaigns['value']:
print(('Campaign with name "%s" and id "%s" was updated.'
% (campaign['name'], campaign['id'])))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| [
"gregory@incipia.co"
] | gregory@incipia.co |
533f0946b24008dd61bcc13df296b0ba6c623059 | b85de5b2a65baa6b67666c65b4e059bf6caaaf6a | /CodeKata/BetweenRange.py | 11e7a801b3d26fb459047a4e980df8ef473e2dd4 | [] | no_license | ar95314/Guvi-1 | 319189d4317aa0c2075e02ca71777c8587eb847b | ef6da1cfd18714adc3acf023314e481ebe326d4e | refs/heads/master | 2020-07-07T02:35:49.526732 | 2019-07-17T18:03:44 | 2019-07-17T18:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | num=int(input())
sr,er=map(int,input().split())
for i in range (sr+1,er):
if num==i:
print('yes')
break
else:
print('no')
| [
"noreply@github.com"
] | ar95314.noreply@github.com |
eab6dc3cbf21c0c830a8a59d2b7a7cec4fc073f4 | 003372621424577306aff35de88f7366fcc4baa0 | /sa_tools_core/libs/qcloud/qcloudsdkbmlb/DescribeBmForwardRulesRequest.py | 8f21d2009e66002bf33ae7d5ef23a2bc5d1b1ae4 | [] | no_license | stoensin/sa-tools-core | ab28ca5f7a8d5703952db9e6554b104682507964 | c0faeef4de8ba677817384d88cb107ad2308c03e | refs/heads/master | 2020-07-11T06:59:33.224305 | 2019-08-22T07:36:46 | 2019-08-22T07:36:46 | 204,472,284 | 1 | 0 | null | 2019-08-26T12:36:18 | 2019-08-26T12:36:18 | null | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
from ..qcloudsdkcore.request import Request
class DescribeBmForwardRulesRequest(Request):
def __init__(self):
super(DescribeBmForwardRulesRequest, self).__init__(
'bmlb', 'qcloudcliV1', 'DescribeBmForwardRules', 'bmlb.api.qcloud.com')
def get_domainIds(self):
return self.get_params().get('domainIds')
def set_domainIds(self, domainIds):
self.add_param('domainIds', domainIds)
def get_listenerId(self):
return self.get_params().get('listenerId')
def set_listenerId(self, listenerId):
self.add_param('listenerId', listenerId)
def get_loadBalancerId(self):
return self.get_params().get('loadBalancerId')
def set_loadBalancerId(self, loadBalancerId):
self.add_param('loadBalancerId', loadBalancerId)
| [
"tclh123@gmail.com"
] | tclh123@gmail.com |
cf2d19b4928717ef00cf356b37153917f6e9fbcf | 621a40fa363dc0c32c96a4c8fdfe9142877e2ff1 | /ietf/wsgi.py | d82a32446861073c43c2f4d1a7d2e60d4fd0ab64 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | omunroe-com/ietfdb2 | d9c40bebe4b25059f810c70dd1370cca30cb3c36 | aeaae292fbd55aca1b6043227ec105e67d73367f | refs/heads/master | 2020-04-04T21:05:56.067430 | 2018-11-05T09:08:27 | 2018-11-05T09:08:27 | 156,273,382 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | """
WSGI configuration for the datatracker.
The following apache datatracker configuration has been used together with a
datatracker checkout of trunk@ under /srv/www/ietfdb/ to run this on a development
server using mod_wsgi under apache. For a production server, additional access
restrictions are needed for the secretariat tools.
----
# This directive must be set globally, not inside <Virtualhost/>:
WSGIPythonEggs /var/www/.python-eggs/
<VirtualHost *:80>
ServerName tracker.tools.ietf.org
ServerSignature Off
CustomLog /var/log/apache2/tracker.tools.ietf.org-access.log full
ErrorLog /var/log/apache2/tracker.tools.ietf.org-error.log
DocumentRoot "/srv/www/ietfdb/static/"
Alias /robots.tx /srv/www/ietfdb/static/dev/robots.txt
AliasMatch "^/((favicon.ico|images|css|js|media|secretariat)(.*))$" /srv/www/ietfdb/static/$1
WSGIScriptAlias / /srv/www/ietfdb/ietf/wsgi.py
<Location "/accounts/login">
AuthType Digest
AuthName "IETF"
AuthUserFile /var/local/loginmgr/digest
AuthGroupFile /var/local/loginmgr/groups
AuthDigestDomain http://tools.ietf.org/
Require valid-user
</Location>
</VirtualHost>
----
"""
import os
import sys
import syslog
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
syslog.openlog("datatracker", syslog.LOG_PID, syslog.LOG_USER)
# Virtualenv support
virtualenv_activation = os.path.join(path, "env", "bin", "activate_this.py")
if os.path.exists(virtualenv_activation):
syslog.syslog("Starting datatracker wsgi with virtualenv %s" % os.path.dirname(os.path.dirname(virtualenv_activation)))
execfile(virtualenv_activation, dict(__file__=virtualenv_activation))
else:
syslog.syslog("Starting datatracker wsgi without virtualenv")
if not path in sys.path:
sys.path.insert(0, path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"henrik@levkowetz.com"
] | henrik@levkowetz.com |
f533ae7916cf74894cb0ece8950764f06b0f5adc | b04791ac170876f94cfe9a021512916296e088ba | /reddit/management/commands/populate_test_data.py | c4156f7579ed585873b595b6c89f8eeb18acf392 | [
"Apache-2.0"
] | permissive | imapex/suggestions | 6cb88856f11b6ba2ecac9cde89f8a829a099ba10 | 292c04e72023269075ce1dba7af0314f2adf3785 | refs/heads/master | 2023-01-10T11:32:58.301709 | 2020-06-10T14:49:36 | 2020-06-10T14:49:36 | 65,758,745 | 0 | 0 | Apache-2.0 | 2022-12-26T20:14:50 | 2016-08-15T19:27:17 | Python | UTF-8 | Python | false | false | 3,742 | py | from random import choice, randint
from string import ascii_letters as letters
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from reddit.models import Comment
from reddit.models import Submission
from users.models import RedditUser
class Command(BaseCommand):
help = 'Generates tests data'
def add_arguments(self, parser):
parser.add_argument('--thread_count', type=int, default=10)
parser.add_argument('--root_comments', type=int, default=10)
def handle(self, *args, **options):
self.thread_count = options['thread_count']
self.root_comments = options['root_comments']
self. random_usernames = [self.get_random_username() for _ in range(100)]
for index, _ in enumerate(range(self.thread_count)):
print("Thread {} out of {}".format(str(index), self.thread_count))
selftext = self.get_random_sentence()
title = self.get_random_sentence(max_words=100, max_word_len=10)
author = self.get_or_create_author(choice(self.random_usernames))
ups = randint(0, 1000)
url = None
downs = int(ups) / 2
comments = 0
submission = Submission(author=author,
title=title,
url=url,
text=selftext,
ups=int(ups),
downs=downs,
score=ups - downs,
comment_count=comments)
submission.generate_html()
submission.save()
for _ in range(self.root_comments):
print("Adding thread comments...")
comment_author = self.get_or_create_author(choice(self.random_usernames))
raw_text = self.get_random_sentence(max_words=100)
new_comment = Comment.create(comment_author, raw_text, submission)
new_comment.save()
another_child = choice([True, False])
while another_child:
self.add_replies(new_comment)
another_child = choice([True, False])
def get_random_username(self, length=6):
return ''.join(choice(letters) for _ in range(length))
def get_random_sentence(min_words=3, max_words=50,
min_word_len=3,
max_word_len=15):
sentence = ''
for _ in range(0, randint(min_words, max_words)):
sentence += ''.join(choice(letters)
for i in
range(randint(min_word_len, max_word_len)))
sentence += ' '
return sentence
def get_or_create_author(self, username):
try:
user = User.objects.get(username=username)
author = RedditUser.objects.get(user=user)
except (User.DoesNotExist, RedditUser.DoesNotExist):
print("Creating user {}".format(username))
new_author = User(username=username)
new_author.set_password(username)
new_author.save()
author = RedditUser(user=new_author)
author.save()
return author
def add_replies(self, root_comment, depth=1):
if depth > 5:
return
comment_author = self.get_or_create_author(choice(self.random_usernames))
raw_text = self.get_random_sentence()
new_comment = Comment.create(comment_author, raw_text, root_comment)
new_comment.save()
if choice([True, False]):
self.add_replies(new_comment, depth + 1)
| [
"kecorbin@cisco.com"
] | kecorbin@cisco.com |
5260fa0a6723d36ec984dccb197382d2f047b7b0 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/networkcloud/azext_networkcloud/aaz/latest/networkcloud/cluster/metricsconfiguration/_delete.py | 571d8251f68cc3f61f8f9fe63aa571dd843e1012 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 6,008 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud cluster metricsconfiguration delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the metrics configuration of the provided cluster.
:example: Delete metrics configuration of cluster
az networkcloud cluster metricsconfiguration delete --cluster-name "clusterName" --resource-group "resourceGroupName"
"""
_aaz_info = {
"version": "2023-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/clusters/{}/metricsconfigurations/{}", "2023-07-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_name = AAZStrArg(
options=["--cluster-name"],
help="The name of the cluster.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
)
_args_schema.metrics_configuration_name = AAZStrArg(
options=["-n", "--name", "--metrics-configuration-name"],
help="The name of the metrics configuration for the cluster.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
pattern="^default$",
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.MetricsConfigurationsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class MetricsConfigurationsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/clusters/{clusterName}/metricsConfigurations/{metricsConfigurationName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"clusterName", self.ctx.args.cluster_name,
required=True,
),
**self.serialize_url_param(
"metricsConfigurationName", self.ctx.args.metrics_configuration_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
63cff687d76aaf00d18be6d51f31afe48a5829e0 | e87d793b3a5facc6e54e0263fbd67703e1fbb382 | /duckietown-world-venv/lib/python3.7/site-packages/geometry/manifolds/translation_group.py | e130e22128d84c5fe827461ccc4e730831887428 | [] | no_license | llingg/behaviour-benchmarking | a860bbe709309e13f3e1133d916944882199a40f | 85bbf1a9c2c628ba74480fe7abac3804d6afdac4 | refs/heads/v1 | 2022-10-06T08:21:29.068329 | 2020-06-11T07:02:46 | 2020-06-11T07:02:46 | 259,622,704 | 0 | 0 | null | 2020-06-02T17:52:46 | 2020-04-28T11:52:08 | C++ | UTF-8 | Python | false | false | 2,334 | py | # coding=utf-8
from contracts import contract
from geometry.poses import extract_pieces, pose_from_rotation_translation, \
rotation_translation_from_pose
from geometry.utils.numpy_backport import assert_allclose
import numpy as np
from .differentiable_manifold import DifferentiableManifold
from .euclidean import R
from .matrix_lie_group import MatrixLieGroup
from .translation_algebra import tran
__all__ = ['TranG', 'Tran', 'Tran1', 'Tran2', 'Tran3']
class TranG(MatrixLieGroup):
'''
The translation subgroup of SE(n).
'''
@contract(n='1|2|3')
def __init__(self, n):
algebra = tran[n]
MatrixLieGroup.__init__(self, n=n + 1, algebra=algebra, dimension=n)
self.En = R[n]
DifferentiableManifold.isomorphism(self, algebra,
self.algebra_from_group,
self.group_from_algebra,
itype='lie')
def __repr__(self):
# return 'Tran(%s)' % (self.n - 1)
return 'Tr%s' % (self.n - 1)
def belongs(self, x):
# TODO: explicit
R, t, zero, one = extract_pieces(x) # @UnusedVariable
assert_allclose(R, np.eye(self.n - 1))
assert_allclose(zero, 0, err_msg='I expect the lower row to be 0.')
assert_allclose(one, 1, err_msg='Bottom-right must be 1.')
@contract(returns='belongs')
def sample_uniform(self):
t = self.En.sample_uniform()
return pose_from_rotation_translation(np.eye(self.n - 1), t)
def friendly(self, a):
t = rotation_translation_from_pose(a)[1]
return 'Tran(%s)' % (self.En.friendly(t))
def logmap(self, base, p):
return base, p - base
def expmap(self, bv):
base, vel = bv
return base + vel
def algebra_from_group(self, g):
a = np.zeros((self.n, self.n))
a[:-1, -1] = g[:-1, -1]
return a
def group_from_algebra(self, a):
g = np.eye(self.n)
g[:-1, -1] = a[:-1, -1]
return g
def interesting_points(self):
points = []
for t in self.En.interesting_points():
p = pose_from_rotation_translation(np.eye(self.n - 1), t)
points.append(p)
return points
Tran1 = TranG(1)
Tran2 = TranG(2)
Tran3 = TranG(3)
Tran = {1: Tran1, 2: Tran2, 3: Tran3}
| [
"linggl@student.ethz.ch"
] | linggl@student.ethz.ch |
0de8c0b935ae6683187554cf72eba6595cdaa7c6 | c68310a6f4a424445081df96a4674da69baf26d6 | /8.- Functions/greet_users.py | 3cc961a85b706c6e3d6736adcbae3fc2a4d96c69 | [] | no_license | martincastro1575/python | d955fcbe300d3e47961ee2e194b815ac8a691d8e | 2d170eb03ef562f2cc10399a5291790138a6ba0b | refs/heads/main | 2023-02-15T17:25:33.233133 | 2021-01-15T15:30:09 | 2021-01-15T15:30:09 | 321,970,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def greet_users(names):
'''Print a simple greeting to each user in the list '''
for name in names:
msg = f"Hello, {name.title()}!!!"
print(msg)
usernames = ['martin', 'maria', 'ines','pedro','carlos','marita']
greet_users(usernames) | [
"martin.castro1575@gmail.com"
] | martin.castro1575@gmail.com |
09f1f32060b30f3d31ee10840ebbf8c3e82c6fad | 606431d6a2ff8f9d589e71c4f17813a094a169dc | /user_management/migrations/0005_userprofile.py | 9c7772d71ba570e18e914bf34741f253783e5566 | [] | no_license | kumar109-beep/Analytica | 057f71a1ac7ff2408c5d3f0e9dd04cf26581101b | 0270b0f52f4711c0d800f2591793fb2310f4437e | refs/heads/main | 2023-08-12T14:25:54.722446 | 2021-09-26T15:44:13 | 2021-09-26T15:44:13 | 410,265,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | # Generated by Django 2.2.1 on 2019-12-10 13:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_management', '0004_delete_user'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"rk468335@gmail.com"
] | rk468335@gmail.com |
90c1cdf8950b6acf443223a67548294405a3c392 | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /python_code/需求实践/0009/s1.py | c11e05606eaace80e31655dcaad195d382a79373 | [] | no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | from random import randint
import time,os
import threading
class Tiger:
classname = 'tiger'
def __init__(self):
self.weight = 200
def roar(self):
print('wow!!!')
self.weight -= 5
def feed(self,food):
if food == 'meat':
self.weight += 10
print('正确,体重 + 10')
else :
self.weight -= 10
print('太惨了,体重 - 10')
class Sheep:
classname = 'sheep'
def __init__(self):
self.weight = 100
def roar(self):
print('mie~~')
self.weight -= 5
def feed(self,food):
if food == 'grass':
self.weight += 10
print('正确,体重 + 10')
else :
self.weight -= 10
print('太惨了,体重 - 10')
class Room:
def __init__(self,num,animal):
self.num = num
self.animal = animal
rooms = []
for no in range(10):
if randint(0,1) == 0:
ani = Tiger()
else:
ani = Sheep()
room = Room(no+1,ani)
rooms.append(room)
def count_thread():
# 记录下游戏开始时间
startTime = time.time()
while True:
time.sleep(0.1)
curTime = time.time()
if (curTime - startTime) > 20:
break
print(u'游戏结束')
for room in rooms:
print(u'房间%s, 里面是%s,体重%s' % (room.num,
room.animal.classname,
room.animal.weight))
os._exit(0)
t = threading.Thread(target=count_thread)
t.start()
# 循环做如下事情
while True:
# 提示房间号,让用户选择 敲门 还是 喂食
curRoomIdx = randint(0,9)
room = rooms[curRoomIdx]
print('当前来到房间%s,敲门【q】还是喂食【w】' % room.num)
ch = input()
# 如果选择敲门:......
if ch == 'q':
room.animal.roar()
# 如果选择喂食:......
elif ch == 'w':
print('请输入食物:')
food = input()
room.animal.feed(food) | [
"1174497735@qq.com"
] | 1174497735@qq.com |
ba9fdc8a9c428a06447e06ac290ad3850f306c47 | c21bb569e67bedc7aa49052f3847a14a30ba7a34 | /request-response/connects-reconnects.py | 3cd39b1453501a8f383b4f41a5e1d9da4d303708 | [] | no_license | jdavid54/mqtt | b268ba44b02a7ce3da98c8fe2c8d460f2afa3466 | 9cbb62dd1cba67e37111b7f60cfd163127023225 | refs/heads/main | 2023-03-26T22:00:44.030664 | 2021-03-25T12:51:24 | 2021-03-25T12:51:24 | 351,430,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | #! python3.4
###demo code provided by Steve Cope at www.steves-internet-guide.com
##email steve@steves-internet-guide.com
###Free to use for any purpose
"""
demo connect and reconnect scipt
"""
import paho.mqtt.client as mqtt
import time
import logging
import sys
def on_connect(client, userdata, flags, rc):
logging.debug("Connected flags"+str(flags)+"result code "\
+str(rc)+"client1_id")
if rc==0:
client.connected_flag=True
#client.subscribe("house/bulb1")
else:
client.bad_connection_flag=True
def on_disconnect(client, userdata,flags, rc=0):
print("DisConnected flags"+"result code "+str(rc))
client.connected_flag=False
def Connect(client,broker,port,keepalive):
connflag=False
print("connecting ",broker)
badcount=0 # counter for bad connection attempts
while not connflag:
logging.info("connecting to broker "+str(broker))
try:
res=client.connect(broker,port,keepalive) #connect to broker
if res==0:
connflag=True
return 0
else:
logging.debug("connection failed ",res)
badcount +=1
if badcount==3:
return -1
raise SystemExit #give up
except:
client.badconnection_flag=True
logging.debug("connection failed")
badcount +=1
if badcount==3:
return -1
raise SystemExit #give up
return 0
#####end connecting
def wait_for(client,msgType,period=1,wait_time=10):
wcount=0
while True:
logging.info("waiting"+ msgType)
if msgType=="CONNACK":
if client.on_connect:
if client.connected_flag:
return True
if client.bad_connection_flag: #
return False
if msgType=="SUBACK":
if client.on_subscribe:
if client.suback_flag:
return True
if msgType=="MESSAGE":
if client.on_message:
return True
if msgType=="PUBACK":
if client.on_publish:
if client.puback_flag:
return True
if not client.running_loop:
client.loop(.01) #check for messages manually
time.sleep(period)
#print("loop flag ",client.running_loop)
wcount+=1
if wcount>wait_time:
print("return from wait loop taken too long")
return False
return True
###########
loglevel="DEBUG"
logging.basicConfig(level=loglevel) #error logging
#use DEBUG,INFO,WARNING,ERROR
client=mqtt.Client("P11")
###set flags in client object
mqtt.Client.bad_connection_flag=False
mqtt.Client.connected_flag=False
mqtt.Client.disconnect_flag=False
####
broker="192.168.1.85" #need to change this
port=1883
keepalive=60
client.loop_start
###set callbacks
client.on_connect=on_connect
client.on_disconnect=on_disconnect
client.running_loop=False #needed by wait_for loop
run_flag=True
client.loop_start() #start a loop
if not client.connected_flag: #flag set in on_connect callback
if Connect(client,broker,port,keepalive) !=-1:
if not wait_for(client,"CONNACK"):
run_flag=False #need to take action to quit
else:
run_flag=False #need to take action to quit
count=0
retry_count=0
while run_flag:
if client.connected_flag:#check for connection
print("in loop would pub and sub here",count)
count+=1
else:
retry_count+=1
if retry_count>=3:
run_flag=False #break from loop
time.sleep(2)
client.loop_stop()
client.disconnect()
| [
"noreply@github.com"
] | jdavid54.noreply@github.com |
a060c6575714e291ae36cdd14ad5c48804650a45 | 8955dfe19f98cd9302480159adc63700adaeb498 | /middle/210 Course Schedule II.py | c65770d106091d22ccf1794ca816d0b06f2145b7 | [] | no_license | VRER1997/leetcode_python | 9e8c30433f2d304290cfb6e782513c8eceee5d4f | 43f392286ee01c7a229a3a12c3ff04e9f4fc7ddc | refs/heads/master | 2020-04-22T09:25:30.802846 | 2019-08-16T07:19:25 | 2019-08-16T07:19:25 | 170,271,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | from typing import List
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
indegrees = [0 for i in range(numCourses)]
edges = [[] for i in range(numCourses)]
for front, tail in prerequisites:
indegrees[front] += 1
edges[tail].append(front)
courses = set(range(numCourses))
flag = True
order = []
while flag and len(courses):
flag = False
removeList = []
for x in courses:
if indegrees[x] == 0:
for node in edges[x]:
indegrees[node] -= 1
removeList.append(x)
order.append(x)
flag = True
for x in removeList:
courses.remove(x)
return order if len(courses) == 0 else [] | [
"1216778435@qq.com"
] | 1216778435@qq.com |
6a805fc5ea0fbab49b44d56ddf37fa7800c1fb7c | af497bbfe8c3131c235df1b3087a90c28c89891e | /ocr.py | d844c329aa7c4854cb984e402728abf8eaaf9163 | [
"blessing"
] | permissive | snwhd/retrotracker | 48e8c2286534f3c2a5057b936c4e39c3082860cd | ad0ce011e2e8a0d13f3479085b2c4e0fecbe5712 | refs/heads/master | 2023-09-01T22:49:45.452343 | 2021-11-19T11:39:22 | 2021-11-19T11:39:22 | 428,159,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | #!/usr/bin/env python3
from __future__ import annotations
from PIL import Image, ImageFilter
import PIL.ImageOps
import pyscreenshot as ImageGrab
import pytesseract
import unidecode
import logging
import re
from typing import (
Generator,
List,
Optional,
Tuple,
)
import warnings
warnings.filterwarnings('ignore', category=Warning)
from monsterdetector import MonsterDetector
IGNORE_REGEX = re.compile(r'(meal\)|Sa 0\))')
# replace common ocr mistakes
INT_TRANS = str.maketrans(
'olis&y?',
'0115677'
)
class OCR:
def __init__(
self,
x: int,
y: int,
w: int,
h: int,
) -> None:
self.monster_detector = MonsterDetector()
self.previous_text: Optional[str] = None
self.previous_line: Optional[str] = None
self.x = x
self.y = y
self.w = w
self.h = h
@staticmethod
def parse_int(s: str) -> int:
if s == 'psu':
# this is a really weird but consistent edge case:
return 20
return int(s.translate(INT_TRANS))
def screen_capture(self) -> Image:
return ImageGrab.grab(bbox=[
self.x,
self.y,
self.x + self.w,
self.y + self.h,
])
def set_bbox(self, x: int, y: int, w: int, h: int) -> None:
self.x = x
self.y = y
self.w = w
self.h = h
def capture_string(self) -> str:
image = self.screen_capture()
r, g, b, a = image.split()
image = Image.merge('RGB', (r, g, b))
image = PIL.ImageOps.invert(image)
image = image.filter(ImageFilter.BLUR)
# image.save('/tmp/mmo.png')
return pytesseract.image_to_string(image)
#
# retroMMO specific stuff
#
def gen_retrommo_lines(self) -> Generator[str, None, None]:
text = unidecode.unidecode(self.capture_string().strip())
# avoid duplicate captures
if text != self.previous_text:
self.previous_text = text
for line in self.gen_split_lines(text):
# avoid duplicate lines
if line != self.previous_line:
self.previous_line = line
logging.debug(line)
yield line
def gen_split_lines(
self,
text: str,
) -> Generator[str, None, None]:
for s in text.split('\n'):
s = s.strip()
if s != '' and len(s) > 5 and not self.ignore(s):
yield s.lower()
def ignore(
self,
text: str,
) -> bool:
return IGNORE_REGEX.match(text) is not None
#
# Optical Monster Recognition
#
def identify_monsters(self) -> List[str]:
image = self.screen_capture()
return self.monster_detector.identify(image)
| [
"none@none"
] | none@none |
13c83e704be21926ea5bc174a5765b317dd2f572 | bc2cdb1e438efaf67131e975ac4db80b4dc43385 | /src/private/code_publish/migrations/0035_auto_20190820_1620.py | 8ede163f1e401882d63da5678df8b7718aec6268 | [] | no_license | Shadow-linux/ops-for-study | cf4d55409ebc6f27d454bea60886cd154c994484 | 115b567948d25a64e423a6cdc89bc8337896afe2 | refs/heads/master | 2023-01-14T13:35:56.880896 | 2019-09-23T05:01:31 | 2019-09-23T05:01:31 | 209,781,758 | 2 | 0 | null | 2023-01-04T10:55:45 | 2019-09-20T12:08:11 | Python | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.0.1 on 2019-08-20 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('code_publish', '0034_auto_20190820_1542'),
]
operations = [
migrations.RemoveField(
model_name='codepublishlockenvapp',
name='app_name',
),
migrations.AddField(
model_name='codepublishlockenvapp',
name='app_name_id',
field=models.CharField(help_text='AppNameID from cp_main_conf', max_length=128, null=True),
),
]
| [
"liangyedong@qipeipu.com"
] | liangyedong@qipeipu.com |
2d87b7dc3bd7a65fa908062504c72c3ce7cd675d | c5276ac07e88486a54c63eb323324998c09577df | /Week-06/double-linked-list.py | f58183e1dd94c1e13620d8bcb0c3fce212b052fa | [] | no_license | PWA-GouldA/Advanced-Programming-Python | c4831d414ffa80fae22b8fc338f28752f98fecee | 144c9ee8adf732d441e45cf5fd0edaee6362e9b0 | refs/heads/master | 2022-12-10T17:28:04.484910 | 2019-05-21T11:16:43 | 2019-05-21T11:16:43 | 172,685,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,194 | py | # for Garbage collection
import gc
class Node:
def __init__(self, next=None, prev=None, data=None):
self.next = next # reference to next node in DLL
self.prev = prev # reference to previous node in DLL
self.data = data
class DoublyLinkedList:
def __init__(self):
self.head = None
# Function to delete a node in a Doubly Linked List.
# head_ref --> pointer to head node pointer.
# dele --> pointer to node to be deleted
def deleteNode(self, dele):
# Base Case
if self.head is None or dele is None:
return
# If node to be deleted is head node
if self.head == dele:
self.head = dele.next
# Change next only if node to be deleted is NOT the last node
if dele.next is not None:
dele.next.prev = dele.prev
# Change prev only if node to be deleted is NOT the first node
if dele.prev is not None:
dele.prev.next = dele.next
# Finally, free the memory occupied by dele
# Call python garbage collector
gc.collect()
# Given a reference to the head of a list and an integer, inserts a new node on the front of list
def push(self, new_data):
# 1. Allocates node
# 2. Put the data in it
new_node = Node(data=new_data)
# 3. Make next of new node as head and
# previous as None (already None)
new_node.next = self.head
# 4. change prev of head node to new_node
if self.head is not None:
self.head.prev = new_node
# 5. move the head to point to the new node
self.head = new_node
def printList(self, node):
while (node is not None):
print(node.data, '<-> ', end='')
node = node.next
# This code is contributed by Nikhil Kumar Singh(nickzuck_007)
# Given a node as prev_node, insert
# a new node after the given node
def insertAfter(self, prev_node, new_data):
# 1. check if the given prev_node is NULL
if prev_node is None:
print("This node doesn't exist in DLL")
return
# 2. allocate node & 3. put in the data
new_node = Node(data=new_data)
# 4. Make next of new node as next of prev_node
new_node.next = prev_node.next
# 5. Make the next of prev_node as new_node
prev_node.next = new_node
# 6. Make prev_node as previous of new_node
new_node.prev = prev_node
# 7. Change previous of new_node's next node */
if new_node.next is not None:
new_node.next.prev = new_node
# This code is contributed by jatinreaper
# Add a node at the end of the DLL
def append(self, new_data):
# 1. allocate node 2. put in the data
new_node = Node(data=new_data)
last = self.head
# 3. This new node is going to be the
# last node, so make next of it as NULL
new_node.next = None
# 4. If the Linked List is empty, then
# make the new node as head
if self.head is None:
new_node.prev = None
self.head = new_node
return
# 5. Else traverse till the last node
while (last.next is not None):
last = last.next
# 6. Change the next of last node
last.next = new_node
# 7. Make last node as previous of new node */
new_node.prev = last
# This code is contributed by jatinreaper
# Driver program to test the above functions
# Start with empty list
dll = DoublyLinkedList()
# Let us create the doubly linked list 10<->8<->4<->2
dll.push(2)
dll.push(4)
dll.push(8)
dll.push(10)
dll.push(12)
print("Original Linked List")
print(dll.printList(dll.head))
# delete nodes from doubly linked list
dll.deleteNode(dll.head)
dll.deleteNode(dll.head.next)
dll.deleteNode(dll.head.next)
# Modified linked list will be NULL<-8->NULL
print("Modified Linked List")
print(dll.printList(dll.head))
| [
"Adrian.Gould@nmtafe.wa.edu.au"
] | Adrian.Gould@nmtafe.wa.edu.au |
1768e183d070071c2b91c9f32da2a06fa66c06cd | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0921+161/sdB_PG_0921+161_coadd.py | b87a80c0a5c0fa3529741039dad7d6fea6e532c4 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[141.158875,15.888608], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_PG_0921+161/sdB_PG_0921+161_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_PG_0921+161/sdB_PG_0921+161_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
10c66a03d36f6b880b761caf0e85aae4b93527e4 | 1698b29429675292393f294ad554dac8efed3e6b | /03_Web/Django/9_16/posts/urls.py | 23f991e0df302dd0302c78804e752a34b1a3676e | [] | no_license | bcking92/TIL | 3daa87e95b6c39413f9c62787c35987804e8dd89 | 60b99cac73820d8af3e050d54f26f1f22a32748d | refs/heads/master | 2023-01-12T10:07:36.042506 | 2021-04-12T22:55:10 | 2021-04-12T22:55:10 | 195,767,319 | 0 | 0 | null | 2023-01-05T00:40:55 | 2019-07-08T08:17:14 | Jupyter Notebook | UTF-8 | Python | false | false | 595 | py | from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.index, name='home'),
path('create/', views.create, name='create'),
path('detail/<int:num>/', views.detail, name='detail'),
path('update/<int:num>/', views.update, name='update'),
path('delete/<int:num>/', views.delete, name='delete'),
path('comment/<int:num>/', views.create_comment, name='comment'),
path('comment/delete/<int:num>/', views.delete_comment, name='delete_comment'),
path('comment/update/<int:num>/', views.update_comment, name='update_comment'),
] | [
"bcking92@gmail.com"
] | bcking92@gmail.com |
b200b6b34858f1c775663ce36035ce4a1cd60ddd | e87aec694108cb1f76716260daf569bcb8091958 | /fluo/management/commands/initdb.py | 2218dbebe83cc324702648674b438d6b2643add4 | [
"MIT"
] | permissive | rsalmaso/django-fluo | a283b8f75769ac6e57fa321c607819899e0c31c8 | 340e3b4f9c1b4b09feccefb9b3ab2d26d59fac2b | refs/heads/master | 2023-01-12T01:37:06.975318 | 2020-12-01T17:13:11 | 2020-12-01T17:13:11 | 48,948,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | # Copyright (C) 2007-2020, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.core.management import call_command
from ..database import DatabaseCommand
class Command(DatabaseCommand):
help = "(Re)create and initialize database with common data"
message = """You have requested to create "%(name)s" database.
This will IRREVERSIBLY DELETE all data currently in the "%(name)s" database if already exists,
and then will create a new database.
Are you sure you want to do this?"""
error_message = """Database %(name)s couldn't be dropped. Possible reasons:
* The database isn't running or isn't configured correctly.
* The database is in use by another user.
The full error: %(error)s"""
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--noadmin",
"--no-admin",
action="store_false",
dest="noadmin",
default=True,
help="Tells Django to NOT create a default admin user.",
)
def execute_sql(self, backend, **options):
backend.dropdb()
backend.createdb()
def migrate(self):
call_command("migrate")
def post_execute(self, **options):
self.migrate()
if options.get("noadmin"):
call_command("load_admin_data")
| [
"raffaele@salmaso.org"
] | raffaele@salmaso.org |
fba96643b6ce7ca44beb319570113d6db4766071 | 85744222aed3e2d77d7c01d037839804f857db95 | /SWEA/1940.py | 49ad1f61622bce45b29bde22394ef214751da4ad | [] | no_license | porciuscato/study_algorithm | 7c86367afab75943cf2de9f0bccc62659fdc6aae | fecf67f8028410c7d59fcc9a037667a169684cf4 | refs/heads/master | 2023-08-21T17:55:39.179551 | 2021-10-22T03:57:50 | 2021-10-22T03:57:50 | 195,784,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import sys
sys.stdin = open('1940.txt', 'r')
for T in range(1, int(input()) + 1):
N = int(input())
answer = 0
velocity = 0
for n in range(N):
inputs = list(map(int, input().split()))
state = inputs[0]
if state == 1:
velocity += inputs[1]
elif state == 2:
velocity -= inputs[1]
if velocity < 0:
velocity = 0
answer += velocity
print('#{} {}'.format(T, answer))
| [
"mpcato@naver.com"
] | mpcato@naver.com |
77d095fcd57cbf06b3de67806420aa9c3b4b53d4 | c56c153e90fa7ae04e8e84410fc6cb3aef1bc800 | /base.R | 5ce8b978749c7f7ac5143b46e420fc5fdba80d0f | [] | no_license | yiyusheng/TC_koubei | 7215d483f9e09982a59c90a9836100e56abb9e25 | a02cee69fb49bd89cd561b64d73c47ddde5a2f57 | refs/heads/master | 2021-01-11T22:33:38.949377 | 2017-02-17T09:01:47 | 2017-02-17T09:01:47 | 78,988,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | r | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Filename: base.R
#
# Description: commenly used function in this project
#
# Copyright (c) 2017, Yusheng Yi <yiyusheng.hust@gmail.com>
#
# Version 1.0
#
# Initial created: 2017-01-16 21:44:43
#
# Last modified: 2017-01-19 11:23:32
#
#
#
get_shop <- function(id,sp = shop_pay){
subset(sp,shop_id %in% id)
}
mse <- function(x,y){
mean((x-y)**2)
}
plot_week_aggra <- function(df){
if(!is.data.frame(df))df <- smp_aggra[[df]]
p <- ggplot(df, aes(x=1:nrow(df), y=smp_mean, group = 1)) +
geom_errorbar(aes(ymin=smp_mean-smp_sd, ymax=smp_mean+smp_sd), width=.1) +
geom_line() + geom_point() + xlab('weeks') + ylab('mean') + ggtitle(df$shop_id[1])
ggsave(p,file = file.path(dir_data,'figure','week_aggra',paste(df$shop_id[1],'.jpg',sep='')))
return(p)
}
arimaX <- function(df,test_start){
dfa <- subset(df,uni_time < test_start)
dfb <- subset(df,uni_time >= test_start)
fit_mean <- auto.arima(dfa$smp_mean,D = 1,trace = F,
max.P = 0,max.Q = 0,max.p = 20,max.q = 20,
ic = 'aic')
fit_sd <- auto.arima(dfa$smp_sd,D = 1,trace = F,
max.P = 0,max.Q = 0,max.p = 20,max.q = 20,
ic = 'aic')
pred_mean <- forecast.Arima(fit_mean,h = 2)$mean[1:2]
pred_sd <- forecast.Arima(fit_sd,h = 2)$mean[1:2]
real_mean <- dfb$smp_mean
real_sd <- dfb$smp_sd
# last_mean <- rep(dfa$smp_mean[which.max(dfa$uni_time)],2)
# last_sd <- rep(dfa$smp_sd[which.max(dfa$uni_time)],2)
# last_mean <- dfa$smp_mean[(nrow(dfa)-1):nrow(dfa)]
# last_sd <- dfa$smp_sd[(nrow(dfa)-1):nrow(dfa)]
# last_mean <- rep(mean(dfa$smp_mean[(nrow(dfa)-1):nrow(dfa)]),2)
# last_sd <- rep(mean(dfa$smp_sd[(nrow(dfa)-1):nrow(dfa)]),2)
last_mean <- predict(fit_mean,dfa$smp_mean[(nrow(dfa)-3):nrow(dfa)])
last_sd <- predict(fit_mean,dfa$smp_sd[(nrow(dfa)-3):nrow(dfa)])
error_mean1 <- mse(real_mean,pred_mean)
error_mean2 <- mse(real_mean,last_mean)
error_sd1 <- mse(real_sd,pred_sd)
error_sd2 <- mse(real_sd,last_sd)
cat(sprintf('%d\n',fct2num(df$shop_id[1])))
data.frame(em1 = error_mean1,em2 = error_mean2,es1 = error_sd1,es2 = error_sd2)
}
# rescale x to a new mu and sigma
rescale <- function(x,mu,sigma){
mu0 <- mean(x)
sigma0 <- sd(x)
(x-mu0)*sigma/sigma0 + mu
}
# predict mean of next week with combination of last 4 weeks
gen_fit_glm <- function(){
comb_start <- test_start - 90*86400
last_weeks <- 4
gen_last_Nweeks_train <- function(df,attr){
# cat(sprintf('%d\n',fct2num(df$shop_id[1])))
dfa <- subset(df,uni_time < test_start & uni_time >= comb_start)
if(nrow(dfa) < 5)return(NULL)
dfb <- subset(df,uni_time >= test_start)
m <- matrix(0,nrow(dfa)-last_weeks,last_weeks+1)
for(i in 1:nrow(m)){
m[i,] <- dfa[[attr]][i:(i+4)]
}
m
}
train_mean <- data.frame(do.call(rbind,lapply(smp_aggra,gen_last_Nweeks_train,attr = 'smp_mean')))
train_sd <- data.frame(do.call(rbind,lapply(smp_aggra,gen_last_Nweeks_train,attr = 'smp_sd')))
fit_mean <- glm(X5~.,data =train_mean,family = 'gaussian')
fit_sd <- glm(X5~.,data =train_sd,family = 'gaussian')
list(fit_mean,fit_sd)
}
| [
"yiyusheng.hust@gmail.com"
] | yiyusheng.hust@gmail.com |
e8ee10c0cb8e27441f73f177ccfd1b2e8b699934 | f7d2114152ec5c8e283b6bfbb9feeb5a4337eb71 | /ACL054400-python37/samples/CH10/object_serialization/dvdlib_pickle.py | fb242f95931ac4828b258ec9149bac400dcd4d4c | [] | no_license | faustfu/hello_python | f5759ff6bf87694e6ba472349f4263bc8ad479b0 | 1487e60e2d9307e1a0ebffbf26d8e075da806a39 | refs/heads/master | 2021-02-10T18:01:50.450295 | 2020-11-20T02:17:18 | 2020-11-20T02:17:18 | 244,406,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import pickle
class DVD:
def __init__(self, title: str, year: int, duration: int, director: str) -> None:
self.title = title
self.year = year
self.duration = duration
self.director = director
self.filename = self.title.replace(' ', '_') + '.pickle'
def save(self):
with open(self.filename, 'wb') as fh:
pickle.dump(self, fh)
@staticmethod
def load(filename: str) -> 'DVD':
with open(filename, 'rb') as fh:
return pickle.load(fh)
def __str__(self):
return repr(self)
def __repr__(self):
return "DVD('{0}', {1}, {2}, '{3}')".format(
self.title, self.year, self.duration, self.director)
dvd1 = DVD('Birds', 2018, 8, 'Justin Lin')
dvd1.save()
dvd2 = DVD.load('Birds.pickle')
print(dvd2)
| [
"faust.fu@gmail.com"
] | faust.fu@gmail.com |
a01a1ee288b6cd45c7e9248a7c0527e5b595153e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /QB6kPXQkFgMkzcc2h_16.py | 3ac2d588f5b7c292966309deac97e107d2c7f2c0 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py |
def remove_abc(txt):
if "a" in txt.lower() or "b" in txt.lower() or "c" in txt.lower():
return txt.replace("a", "").replace("b", "").replace("c", "")
else:
return None
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7e39932d9b6d14371a3a3a197aaff1225753b296 | 8200c609ae9c78af2848237f1c43e54b8560e87b | /tests/test_minres.py | 80012f3468411040939b76c6609ecd9259e1600f | [
"MIT"
] | permissive | christian-cahig/krylov | ecf07f50000ac6c8441f0713693f52df337240db | b6bb7e01e0d3409dbd506ec8d232a5da19e3e7ca | refs/heads/main | 2023-04-28T16:52:50.887796 | 2021-05-21T18:54:08 | 2021-05-21T18:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | import pytest
import krylov
from .helpers import assert_consistent
from .linear_problems import hermitian_indefinite, hpd
from .linear_problems import spd_dense as spd
from .linear_problems import spd_rhs_0, spd_rhs_0sol0, symmetric_indefinite
@pytest.mark.parametrize(
"A_b",
[
spd((5,)),
spd((5, 1)),
spd((5, 3)),
spd_rhs_0((5,)),
spd_rhs_0sol0(),
hpd(),
symmetric_indefinite(),
hermitian_indefinite(),
],
)
def test_minres(A_b):
A, b = A_b
callback_counter = 0
def callback(x, r):
nonlocal callback_counter
callback_counter += 1
sol, info = krylov.minres(A, b, tol=1.0e-7, callback=callback)
assert callback_counter == info.numsteps + 1
assert info.success
assert_consistent(A, b, info, sol, 1.0e-7)
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
21044048461cf0140e43fca4b8b0dbbcda7b32d9 | c31c8095ce4d4e9686e3e7ad6b004342e49671fa | /forum/migrations/0086_auto_20190124_1403.py | d90ca9f50c660347e4e531ec4f61dbeda976fe51 | [] | no_license | Lionalisk/arrakambre | 7bcc96dea2ca2a471572bfb1646256f1382ce25b | 2caece9be5eebf21ddfa87a6c821c32b5d5019a2 | refs/heads/master | 2020-12-07T19:31:24.471090 | 2020-01-09T10:14:29 | 2020-01-09T10:14:29 | 232,782,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | # Generated by Django 2.1.3 on 2019-01-24 13:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum', '0085_auto_20190124_1221'),
]
operations = [
migrations.AddField(
model_name='perso',
name='classe_principale',
field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='classe_principale', to='forum.Categorie_competence'),
),
migrations.AddField(
model_name='perso',
name='classe_secondaire',
field=models.ForeignKey(default=2, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='classe_secondaire', to='forum.Categorie_competence'),
),
]
| [
"lionel.varaire@free.fr"
] | lionel.varaire@free.fr |
3ec923644ba27f03e70f29eba4ecb470b4167106 | 6df9a960c0a4e2049b5932938a83ee82d4516412 | /databases_2/m2m-relations/articles/views.py | 1ceee601e7e1fec92b9fb7cad031c5bb5f236bf9 | [] | no_license | alekseykonotop/dj_hw | 9585f0d42ec95d31f5eeae09b953e5f195bc9ee7 | 6752361d007d777127eb77445d45da58332e0223 | refs/heads/master | 2021-07-19T06:30:04.333018 | 2019-09-21T18:12:38 | 2019-09-21T18:12:38 | 177,439,677 | 0 | 0 | null | 2020-06-05T22:56:52 | 2019-03-24T16:24:46 | Python | UTF-8 | Python | false | false | 223 | py | from django.views.generic import ListView
from .models import Article, Category, Compilation
class ArticleListView(ListView):
template_name = 'articles/news.html'
model = Article
ordering = '-published_at'
| [
"alekseykonotop@gmail.com"
] | alekseykonotop@gmail.com |
041df02d4c1cb214bc633d627071a694b88f2500 | 99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa | /src/pytezos/michelson/instructions/__init__.py | ab84dc488ef90063a0ec8975b4ef10d992552b94 | [
"MIT"
] | permissive | baking-bad/pytezos | c4248bde49a5b05521b8cc51eeca588b1a721660 | 19747e3acec2141f06e812025673f497fc07e2d4 | refs/heads/master | 2023-07-06T21:57:09.572985 | 2023-07-05T11:45:27 | 2023-07-05T11:45:27 | 169,243,460 | 115 | 43 | MIT | 2023-07-04T16:28:09 | 2019-02-05T13:12:50 | Python | UTF-8 | Python | false | false | 8,126 | py | from pytezos.michelson.instructions.adt import CarInstruction
from pytezos.michelson.instructions.adt import CdrInstruction
from pytezos.michelson.instructions.adt import GetnInstruction
from pytezos.michelson.instructions.adt import LeftInstruction
from pytezos.michelson.instructions.adt import PairInstruction
from pytezos.michelson.instructions.adt import RightInstruction
from pytezos.michelson.instructions.adt import UnpairInstruction
from pytezos.michelson.instructions.adt import UpdatenInstruction
from pytezos.michelson.instructions.arithmetic import AbsInstruction
from pytezos.michelson.instructions.arithmetic import AddInstruction
from pytezos.michelson.instructions.arithmetic import BytesInstruction
from pytezos.michelson.instructions.arithmetic import EdivInstruction
from pytezos.michelson.instructions.arithmetic import IntInstruction
from pytezos.michelson.instructions.arithmetic import IsNatInstruction
from pytezos.michelson.instructions.arithmetic import LslInstruction
from pytezos.michelson.instructions.arithmetic import LsrInstruction
from pytezos.michelson.instructions.arithmetic import MulInstruction
from pytezos.michelson.instructions.arithmetic import NatInstruction
from pytezos.michelson.instructions.arithmetic import NegInstruction
from pytezos.michelson.instructions.boolean import AndInstruction
from pytezos.michelson.instructions.boolean import NotInstruction
from pytezos.michelson.instructions.boolean import OrInstruction
from pytezos.michelson.instructions.boolean import XorInstruction
from pytezos.michelson.instructions.compare import CompareInstruction
from pytezos.michelson.instructions.compare import EqInstruction
from pytezos.michelson.instructions.compare import GeInstruction
from pytezos.michelson.instructions.compare import GtInstruction
from pytezos.michelson.instructions.compare import LeInstruction
from pytezos.michelson.instructions.compare import LtInstruction
from pytezos.michelson.instructions.compare import NeqInstruction
from pytezos.michelson.instructions.control import ApplyInstruction
from pytezos.michelson.instructions.control import DipInstruction
from pytezos.michelson.instructions.control import DipnInstruction
from pytezos.michelson.instructions.control import ExecInstruction
from pytezos.michelson.instructions.control import FailwithInstruction
from pytezos.michelson.instructions.control import IfConsInstruction
from pytezos.michelson.instructions.control import IfInstruction
from pytezos.michelson.instructions.control import IfLeftInstruction
from pytezos.michelson.instructions.control import IfNoneInstruction
from pytezos.michelson.instructions.control import IterInstruction
from pytezos.michelson.instructions.control import LambdaInstruction
from pytezos.michelson.instructions.control import LambdaRecInstruction
from pytezos.michelson.instructions.control import LoopInstruction
from pytezos.michelson.instructions.control import LoopLeftInstruction
from pytezos.michelson.instructions.control import MapInstruction
from pytezos.michelson.instructions.control import PushInstruction
from pytezos.michelson.instructions.crypto import Blake2bInstruction
from pytezos.michelson.instructions.crypto import CheckSignatureInstruction
from pytezos.michelson.instructions.crypto import HashKeyInstruction
from pytezos.michelson.instructions.crypto import KeccakInstruction
from pytezos.michelson.instructions.crypto import PairingCheckInstruction
from pytezos.michelson.instructions.crypto import SaplingEmptyStateInstruction
from pytezos.michelson.instructions.crypto import SaplingVerifyUpdateInstruction
from pytezos.michelson.instructions.crypto import Sha3Instruction
from pytezos.michelson.instructions.crypto import Sha256Instruction
from pytezos.michelson.instructions.crypto import Sha512Instruction
from pytezos.michelson.instructions.generic import ConcatInstruction
from pytezos.michelson.instructions.generic import NeverInstruction
from pytezos.michelson.instructions.generic import PackInstruction
from pytezos.michelson.instructions.generic import SizeInstruction
from pytezos.michelson.instructions.generic import SliceInstruction
from pytezos.michelson.instructions.generic import UnitInstruction
from pytezos.michelson.instructions.generic import UnpackInstruction
from pytezos.michelson.instructions.jupyter import BeginInstruction
from pytezos.michelson.instructions.jupyter import BigMapDiffInstruction
from pytezos.michelson.instructions.jupyter import CommitInstruction
from pytezos.michelson.instructions.jupyter import DebugInstruction
from pytezos.michelson.instructions.jupyter import DropAllInstruction
from pytezos.michelson.instructions.jupyter import DumpAllInstruction
from pytezos.michelson.instructions.jupyter import DumpInstruction
from pytezos.michelson.instructions.jupyter import PatchInstruction
from pytezos.michelson.instructions.jupyter import PatchValueInstruction
from pytezos.michelson.instructions.jupyter import PrintInstruction
from pytezos.michelson.instructions.jupyter import ResetInstruction
from pytezos.michelson.instructions.jupyter import ResetValueInstruction
from pytezos.michelson.instructions.jupyter import RunInstruction
from pytezos.michelson.instructions.stack import DigInstruction
from pytezos.michelson.instructions.stack import DropInstruction
from pytezos.michelson.instructions.stack import DropnInstruction
from pytezos.michelson.instructions.stack import DugInstruction
from pytezos.michelson.instructions.stack import DupInstruction
from pytezos.michelson.instructions.stack import DupnInstruction
from pytezos.michelson.instructions.stack import PushInstruction
from pytezos.michelson.instructions.stack import RenameInstruction
from pytezos.michelson.instructions.stack import SwapInstruction
from pytezos.michelson.instructions.struct import ConsInstruction
from pytezos.michelson.instructions.struct import EmptyBigMapInstruction
from pytezos.michelson.instructions.struct import EmptyMapInstruction
from pytezos.michelson.instructions.struct import EmptySetInstruction
from pytezos.michelson.instructions.struct import GetAndUpdateInstruction
from pytezos.michelson.instructions.struct import GetInstruction
from pytezos.michelson.instructions.struct import MemInstruction
from pytezos.michelson.instructions.struct import NilInstruction
from pytezos.michelson.instructions.struct import NoneInstruction
from pytezos.michelson.instructions.struct import SomeInstruction
from pytezos.michelson.instructions.struct import UpdateInstruction
from pytezos.michelson.instructions.tezos import AddressInstruction
from pytezos.michelson.instructions.tezos import AmountInstruction
from pytezos.michelson.instructions.tezos import BalanceInstruction
from pytezos.michelson.instructions.tezos import ChainIdInstruction
from pytezos.michelson.instructions.tezos import ContractInstruction
from pytezos.michelson.instructions.tezos import CreateContractInstruction
from pytezos.michelson.instructions.tezos import ImplicitAccountInstruction
from pytezos.michelson.instructions.tezos import MinBlockTimeInstruction
from pytezos.michelson.instructions.tezos import NowInstruction
from pytezos.michelson.instructions.tezos import SelfAddressInstruction
from pytezos.michelson.instructions.tezos import SelfInstruction
from pytezos.michelson.instructions.tezos import SenderInstruction
from pytezos.michelson.instructions.tezos import SetDelegateInstruction
from pytezos.michelson.instructions.tezos import SourceInstruction
from pytezos.michelson.instructions.tezos import TotalVotingPowerInstruction
from pytezos.michelson.instructions.tezos import TransferTokensInstruction
from pytezos.michelson.instructions.tezos import VotingPowerInstruction
from pytezos.michelson.instructions.ticket import JoinTicketsInstruction
from pytezos.michelson.instructions.ticket import ReadTicketInstruction
from pytezos.michelson.instructions.ticket import SplitTicketInstruction
from pytezos.michelson.instructions.ticket import TicketInstruction
from pytezos.michelson.instructions.tzt import BigMapInstruction
from pytezos.michelson.instructions.tzt import StackEltInstruction
| [
"noreply@github.com"
] | baking-bad.noreply@github.com |
f2e5aa16489de06496939150e81354951081ef4a | 7615cb2a3d19a998ada9ed7994a5c304b65ad760 | /HandlingButtonClicks.py | e0a4f00751140ac3392837403b1d7f2c5920d2f8 | [] | no_license | sadiqulislam/GUI-Tkinter | 61ae812443f51a21b84033b31997c9a50f8d78cf | ab37e78c9f5f27f9b79b5908055391d3dd923c58 | refs/heads/master | 2021-01-02T09:55:01.743688 | 2020-02-11T16:55:21 | 2020-02-11T16:55:21 | 239,565,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from tkinter import *
root = Tk()
def click():
print("You Clicked Here")
button1 = Button(root,text="Click Here",command=click)
button1.pack()
root.mainloop() | [
"sishishir2015@gmail.com"
] | sishishir2015@gmail.com |
6a79b2cad508e9b03721168440cfa6c8f0590b3f | 41a3baf733d7b701bdd3600bf4f7aa227060785f | /q7_8.py | 316c41be5d7362b1017efed0e21cae99d30f8a4d | [] | no_license | domspad/ctci_solutions | 7a8264ef26fd2209adaed005cc0c8f0d6538ab05 | 6a3af811732f62a613f7fa884f5ae0e0a9e64132 | refs/heads/master | 2021-01-02T09:38:05.278621 | 2015-03-11T05:28:29 | 2015-03-11T05:28:29 | 30,838,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | """
# Chapter 7: Object-oriented Design
# Problem 2:
Othello is played as follows: Each Othello piece is white on one side and black on the other
When a piece is surrounded by its opponents on both the left and right sides, or both the top and bottom,
it is said to be captured and its color is flipped On your turn,
you must capture at least one of your opponent’s pieces The game ends when either user has no more valid moves,
and the win is assigned to the person with the most pieces Implement the object oriented design for Othello
"""
################################################################################
################################################################################
################################################################################
# SOLUTIONS
"""
Solution 1 :
designed for 2 players
only one main class that hides a lot of inner functions
when started, runs continously until game is over
"""
class Question(object) :
white = 1
black = 2
def start(self) :
"""
Sets up board into valid starting positions
"""
def _won(self) :
"""
Retunrns winner if any, else 0
"""
def _canGo(self, color) :
"""
returns True if color has valid move on his turn, else false
"""
def _isValid(self, color, pos) :
"""
returns true if pos is local move for color
"""
def _getMove(self, color) :
"""
prompts player for move, exception is invalid input
"""
def _addMove(self, color, pos) :
"""
adds valid move to the board, and updates pieces on board
"""
def _printBoard(self) :
"""
prints current state of board
"""
def _game(self) :
"""
runs continously until a player wins
"""
self._printBoard()
while(self._won() == 0) :
valid = False
while(not valid) :
try :
self._getMove()
valid = True
except :
print "enter valid coordinate"
self._printBoard()
if self._won() != 3 :
if self._won() == 1 :
print "white won"
else :
print "black won"
else :
print "draw!"
"""
Solution 2 :
Setting up like we did for the card game in peter norvig's class
"""
class Game(object) :
def __init__(self) :
def newGame(self, numPlayers, size) :
def getTurn() :
def isFinished() :
class Board(object) :
def __init__(self) :
def getMoves(self, color) :
def setMove(self, color, loc) :
def printBoard(self) :
#player is strategy function
| [
"domspad@umich.edu"
] | domspad@umich.edu |
9c03847e0d6865a99ca8a01dffb1c2d0f7edd4db | 184d8b600b66ceed4e065878447fd3b99d137a48 | /SAN/san_eval.py | 60938ea99585b075c384a071f438c62d9397c7a0 | [
"MIT"
] | permissive | arnoldjair/landmark-detection | 10d45bcdfbb469a3f59fb7d3916fe508fc0b150f | 1ad9db7d94397d81898f6f7c05abe76806d3d85e | refs/heads/master | 2023-03-07T20:06:57.594994 | 2021-02-15T02:56:49 | 2021-02-15T02:57:44 | 261,280,519 | 0 | 0 | MIT | 2020-05-04T19:48:14 | 2020-05-04T19:48:13 | null | UTF-8 | Python | false | false | 4,894 | py | ##############################################################
### Copyright (c) 2018-present, Xuanyi Dong ###
### Style Aggregated Network for Facial Landmark Detection ###
### Computer Vision and Pattern Recognition, 2018 ###
##############################################################
from __future__ import division
import os, sys, time, random, argparse, PIL
from pathlib import Path
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True # please use Pillow 4.0.0 or it may fail for some images
from os import path as osp
import numbers, numpy as np
import init_path
import torch
import models
import datasets
from visualization import draw_image_by_points
from san_vision import transforms
from utils import time_string, time_for_file, get_model_infos
def evaluate(args):
if not args.cpu:
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
print ('The image is {:}'.format(args.image))
print ('The model is {:}'.format(args.model))
snapshot = Path(args.model)
assert snapshot.exists(), 'The model path {:} does not exist'
print ('The face bounding box is {:}'.format(args.face))
assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
if args.cpu: snapshot = torch.load(snapshot, map_location='cpu')
else : snapshot = torch.load(snapshot)
mean_fill = tuple( [int(x*255) for x in [0.5, 0.5, 0.5] ] )
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
param = snapshot['args']
eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize])
net = models.__dict__[param.arch](param.modelconfig, None)
if not args.cpu: net = net.cuda()
weights = models.remove_module_dict(snapshot['state_dict'])
net.load_state_dict(weights)
dataset = datasets.GeneralDataset(eval_transform, param.sigma, param.downsample, param.heatmap_type, param.dataset_name)
dataset.reset(param.num_pts)
print ('[{:}] prepare the input data'.format(time_string()))
[image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face)
print ('[{:}] prepare the input data done'.format(time_string()))
print ('Net : \n{:}'.format(net))
# network forward
with torch.no_grad():
if args.cpu: inputs = image.unsqueeze(0)
else : inputs = image.unsqueeze(0).cuda()
batch_heatmaps, batch_locs, batch_scos, _ = net(inputs)
#print ('input-shape : {:}'.format(inputs.shape))
flops, params = get_model_infos(net, inputs.shape, None)
print ('\nIN-shape : {:}, FLOPs : {:} MB, Params : {:}.'.format(list(inputs.shape), flops, params))
flops, params = get_model_infos(net, None, inputs)
print ('\nIN-shape : {:}, FLOPs : {:} MB, Params : {:}.'.format(list(inputs.shape), flops, params))
print ('[{:}] the network forward done'.format(time_string()))
# obtain the locations on the image in the orignial size
cpu = torch.device('cpu')
np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy()
locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1)
scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1)
locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3]
prediction = np.concatenate((locations, scores), axis=1).transpose(1,0)
for i in range(param.num_pts):
point = prediction[:, i]
print ('The coordinate of {:02d}/{:02d}-th points : ({:.1f}, {:.1f}), score = {:.3f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2])))
if args.save_path:
image = draw_image_by_points(args.image, prediction, 1, (255,0,0), False, False)
image.save( args.save_path )
print ('save image with landmarks into {:}'.format(args.save_path))
print('finish san evaluation on a single image : {:}'.format(args.image))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate a single image by the trained model', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image', type=str, help='The evaluation image path.')
parser.add_argument('--model', type=str, help='The snapshot to the saved detector.')
parser.add_argument('--face', nargs='+', type=float, help='The coordinate [x1,y1,x2,y2] of a face')
parser.add_argument('--save_path', type=str, help='The path to save the visualization results')
parser.add_argument('--cpu', action='store_true', help='Use CPU or not.')
args = parser.parse_args()
evaluate(args)
| [
"280835372@qq.com"
] | 280835372@qq.com |
2a3797f3bed09887087024272f762da60f75cf0c | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Data Structures/Disjoint Data Structures/Basics of Disjoint Data Structures/Utkarsh and Sub Array Xor/test.py | 27983cd92535eaaa15d7a1cf008205989e6f294e | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 587 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('sys.stdin.readline', side_effect=[
'5 3',
'1 1',
'2 3',
'5 5',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'16\n' +
'8\n' +
'4\n')
if __name__ == '__main__':
unittest.main()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
f310095a1ac0eb8cfb8178e08cc8ef0bc17261cd | 70f564990215f47b139a777826f211477e9b44f6 | /plan2vec/plotting/visualize_pairs_2d.py | baba4ce4a5bc3ae7fadfd4c5f4a7a5b08a1d0a2a | [] | no_license | geyang/plan2vec | de87f2d77732c4aacdefd00067ebebacb7cd763f | aeeb50aed3d7da4c266b4ca163e96d4c0747e3c1 | refs/heads/master | 2022-11-16T03:40:42.638239 | 2022-10-28T04:01:29 | 2022-10-28T04:01:29 | 261,273,420 | 65 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,191 | py | import numpy.ma as ma
def visualize_neighbors(xs, ns, key=None):
"""
Visualizing points, and their neighbors in the dataset
:param xs: Size(batch_n, 2)
:param ns: Size(batch_n, k, 2), k being the number of neighbors to show
:param key: The path to save the figure to
:return:
"""
import matplotlib.pyplot as plt
assert len(xs) == len(ns), "state samples and be neighbors need to have the same length"
DPI = 300
title = "neighbors"
plt.figure(figsize=(3, 3), dpi=DPI, )
plt.title(title)
k = xs.shape[0]
for i, (x, neighbors) in enumerate(zip(xs, ns)):
alpha = (i + 1) / (k + 1)
# note: marker size = points/inch * actual axial size.
plt.plot(x[0], x[1], "o", c="black", alpha=alpha, markersize=DPI * 0.015 * 2, mec="none")
for n in neighbors:
plt.plot([x[0], n[0]], [x[1], n[1]], "-", c='black', alpha=alpha, linewidth=3, mec='none')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.gca().set_aspect('equal')
if key is None:
plt.show()
else:
from ml_logger import logger
logger.savefig(key)
plt.close()
def visualize_start_goal(starts, goals, key=None):
"""
Visualizing the start and the goals
:param starts: Size(batch_n, 2)
:param goals: Size(batch_n, 2)
:param key: The path to save the figure to
:return:
"""
import matplotlib.pyplot as plt
assert len(starts) == len(goals), "starts and goals need to have the same length"
DPI = 300
title = "Start and Goals"
plt.figure(figsize=(3, 3), dpi=DPI, )
plt.title(title)
k = goals.shape[0]
for i, (x, g) in enumerate(zip(starts, goals)):
alpha = (i + 1) / (k + 1)
# note: marker size = points/inch * actual axial size.
plt.plot(g[0], g[1], "o", c="black", alpha=alpha, markersize=DPI * 0.015 * 2, mec="none")
plt.plot([x[0], g[0]], [x[1], g[1]], "-", c='black', alpha=alpha, linewidth=3, mec='none')
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.gca().set_aspect('equal')
if key is None:
plt.show()
else:
from ml_logger import logger
logger.savefig(key)
plt.close()
def visualize_latent_plans(xs, goals, done, key=None):
"""
Visualizing the sample trajectories in a 2-dimensional domain
We use the done flags to
:param xs: Size(B, n, 2)
:param goals: Size(B, n, 2)
:param done: Size(B, n, 2)
:param key: The path to save the figure to
:return:
"""
import matplotlib.pyplot as plt
colors = ['#49b8ff', '#66c56c', '#f4b247']
DPI = 300
title = "Trajectory Distribution"
plt.figure(figsize=(3, 3), dpi=DPI, )
plt.title(title)
n, k, *_ = xs.shape
for i in range(k):
alpha = (i + 1) / (k + 1)
# alpha = 0.7
x, g, d = ma.array(xs[:, i]), ma.array(goals[:, i]), done[:, i]
x[d] = ma.masked
g[d] = ma.masked
c = colors[i % len(colors)]
plt.plot(g[:, 0], g[:, 1], "o", c="red", alpha=0.7 / (n + 1), markersize=DPI * 0.04 * 2, mec="none")
plt.plot(ma.array(x[:, 0]), ma.array(x[:, 1]), 'o-', c="#23aaff", alpha=alpha, linewidth=2,
markersize=DPI * 0.01 * 2, mec="none")
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.gca().set_aspect('equal')
if key is None:
plt.show()
else:
from ml_logger import logger
logger.savefig(key)
plt.close()
def visualize_trajectories_2d(paths, key=None):
"""
Visualizing the sample trajectories in a 2-dimensional domain
:param paths: the dictionary of the sampled dataset
:param key: The path to save the figure to
:return:
"""
import matplotlib.pyplot as plt
DPI = 300
title = "Trajectory Distribution"
plt.figure(figsize=(3, 3), dpi=DPI, )
plt.title(title)
goals = paths['obs']["goal"][:1]
k = goals.shape[1]
# note: marker size = points/inch * actual axial size.
plt.plot(goals[:, :, 0], goals[:, :, 1], "o", c="red", alpha=0.7, markersize=DPI * 0.04 * 2, mec="none")
trajs = paths['obs']["x"]
# todo: ues different color for different trajectories
# note: marker size = points/inch * actual axial size.
plt.plot(trajs[:, :, 0], trajs[:, :, 1], 'o-', c="#23aaff", alpha=10 / k, linewidth=2,
markersize=DPI * 0.01 * 2, mec="none")
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.gca().set_aspect('equal')
if key is None:
plt.show()
else:
from ml_logger import logger
logger.savefig(key)
plt.close()
def visualize_skewed_trajectories(paths, key=None):
"""
Visualizing the sample trajectories in a 2-dimensional domain
:param paths: the dictionary of the sampled dataset
:param key: The path to save the figure to
:return:
"""
import matplotlib.pyplot as plt
DPI = 300
title = "Trajectory Distribution"
plt.figure(figsize=(3, 3), dpi=DPI, )
plt.title(title)
goals = paths['obs']["goal"][:1]
k = goals.shape[1]
# note: marker size = points/inch * actual axial size.
if k < 10:
plt.plot(goals[:, :, 0], goals[:, :, 1], "o", c="gray", alpha=0.1, markersize=DPI * 0.04 * 2, mec="none")
trajs = paths['obs']["x"]
k = trajs.shape[1]
colors = ['#23aaff'] if k > 10 else ['#49b8ff', '#66c56c', '#f4b247']
from ge_world.c_maze import good_goal
good = np.array([good_goal(_) for _ in trajs.reshape(-1, 2)]).reshape(trajs.shape[:2])
# todo: ues different color for different trajectories
# note: marker size = points/inch * actual axial size.
for i, traj in enumerate(np.swapaxes(trajs, 0, 1)):
plt.plot(traj[:, 0], traj[:, 1], '-', c=colors[i % len(colors)], alpha=0.5 if k < 10 else (10. / k),
linewidth=2)
for i, (traj, g) in enumerate(zip(np.swapaxes(trajs, 0, 1), np.swapaxes(good, 0, 1))):
c = list(np.where(g, colors[i % len(colors)], "red"))
plt.scatter(traj[:, 0], traj[:, 1], s=DPI * 0.05 * 2, c=c, marker='o', alpha=0.8 if k < 10 else (10. / k),
linewidth=2, edgecolors="none")
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.gca().set_aspect('equal')
if key is None:
plt.show()
else:
from ml_logger import logger
logger.savefig(key)
plt.close()
if __name__ == "__main__":
from tqdm import trange
import numpy as np
from plan2vec.mdp.sampler import path_gen_fn
from plan2vec.mdp.helpers import make_env
from plan2vec.mdp.wrappers.subproc_vec_env import SubprocVecEnv
from ge_world import IS_PATCHED
assert IS_PATCHED is True, "need patch"
envs = SubprocVecEnv([make_env('CMazeDiscreteIdLess-v0', i) for i in trange(500)])
random_pi = lambda ob, goal, *_: np.random.randint(0, 8, size=[len(ob)])
rev_a_dict = {(-0.5, -0.5): 0,
(-0.5, 0): 1,
(-0.5, 0.5): 2,
(0, -0.5): 3,
(0, 0.5): 4,
(0.5, -0.5): 5,
(0.5, 0): 6,
(0.5, 0.5): 7}
def homing_pi(ob, goal):
act = map(tuple, 0.5 * abs(np.array(goal) - np.array(ob)) / (np.array(goal) - np.array(ob)))
return [rev_a_dict[a] for a in act]
eps = 0.1
def greedy_pi(ob, goal):
if np.random.rand(1) < eps:
return random_pi(ob, goal)
return homing_pi(ob, goal)
# servo_pi = lambda ob, goal, *_: rev_a_dict[
# tuple(0.5 * abs(np.array(goal) - np.array(ob)) / (np.array(goal) - np.array(ob)))]
rand_path_gen = path_gen_fn(envs, greedy_pi, "x", "goal", all_keys=['x', 'goal'])
next(rand_path_gen)
p = rand_path_gen.send(50)
# a, b = p['obs']['x'][:2, 0]
# b - a
# print(p['x'])
# visualize_trajectories_2d(p, f"./figures/CMaze trajectories (fixed).png")
# visualize_trajectories_2d(p, f"./figures/CMaze trajectories (homing).png")
# visualize_trajectories_2d(p, f"./figures/CMaze trajectories (greedy {eps}).png")
visualize_skewed_trajectories(p, f"figures/CMaze rejected goals (500).png")
# visualize_skewed_trajectories(p)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
0dec8f63058472db6eff1fc379f5f8579e6dab45 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/interactions/picker/picker_pie_menu_interaction.py | 8769291d17aacc068029716f6b93552f0757e5f9 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,817 | py | from event_testing.results import TestResult
from objects.base_interactions import ProxyInteraction
from sims4.utils import flexmethod, classproperty
from singletons import DEFAULT
class _PickerPieMenuProxyInteraction(ProxyInteraction):
INSTANCE_SUBCLASSES_ONLY = True
@classproperty
def proxy_name(cls):
return '[PickerRow]'
@classmethod
def generate(cls, proxied_affordance, picker_row_data):
result = super().generate(proxied_affordance)
result.picker_row_data = picker_row_data
if hasattr(proxied_affordance, 'funds_source'):
result.register_cost_gain_strings_callbacks(proxied_affordance.funds_source.get_cost_string, proxied_affordance.funds_source.get_gain_string)
return result
@classmethod
def potential_interactions(cls, target, context, **kwargs):
yield cls.generate_aop(target, context, **kwargs)
@classmethod
def _test(cls, target, context, **kwargs):
result = super()._test(target, context, **kwargs)
if not result:
return result
if cls.picker_row_data.is_enable:
tooltip = None
else:
tooltip = cls.picker_row_data.row_tooltip if cls.pie_menu_option.show_disabled_item else None
return TestResult(cls.picker_row_data.is_enable, influence_by_active_mood=cls.picker_row_data.pie_menu_influence_by_active_mood, tooltip=tooltip)
@flexmethod
def get_display_tooltip(cls, inst, override=None, context=DEFAULT, **kwargs):
inst_or_cls = inst if inst is not None else cls
context = inst.context if context is DEFAULT else context
tooltip = inst_or_cls.display_tooltip
if override.new_display_tooltip is not None:
tooltip = override.new_display_tooltip
if override is not None and tooltip is None:
tooltip = inst_or_cls.picker_row_data.row_tooltip
if tooltip is not None:
tooltip = inst_or_cls.create_localized_string(tooltip, context=context, **kwargs)
return tooltip
@flexmethod
def get_pie_menu_category(cls, inst, **kwargs):
inst_or_cls = inst if inst is not None else cls
if inst_or_cls.pie_menu_option.force_pie_menu_category:
return inst_or_cls.pie_menu_option.pie_menu_category
return inst_or_cls.picker_row_data.pie_menu_category or inst_or_cls.pie_menu_option.pie_menu_category
@flexmethod
def _get_name(cls, inst, target=DEFAULT, context=DEFAULT, **kwargs):
inst_or_cls = inst if inst is not None else cls
display_name = inst_or_cls.pie_menu_option.pie_menu_name
(override_tunable, _) = inst_or_cls.get_name_override_and_test_result(target=target, context=context)
if override_tunable.new_display_name is not None:
display_name = override_tunable.new_display_name
display_name = inst_or_cls.create_localized_string(display_name, inst_or_cls.picker_row_data.name, target=target, context=context, **kwargs)
price = getattr(inst_or_cls.picker_row_data, 'price', 0)
if override_tunable is not None and price > 0:
if inst_or_cls.picker_row_data.is_discounted:
price = inst_or_cls.picker_row_data.discounted_price
cost_name_factory = inst_or_cls.get_cost_name_factory()
display_name = cost_name_factory(display_name, price)
return display_name
def _run_interaction_gen(self, timeline):
yield from super()._run_interaction_gen(timeline)
ingredient_data = self._kwargs.get('recipe_ingredients_map')
self.on_choice_selected(self.picker_row_data.tag, ingredient_data=ingredient_data, ingredient_check=ingredient_data is not None)
return True
| [
"44103490+daniela-venuta@users.noreply.github.com"
] | 44103490+daniela-venuta@users.noreply.github.com |
0f6ed1a800cc6996f60226a18705142f486db303 | 21b07f788809bce4bc7d917b429018b53d71a5b1 | /esp32/ioboard-test-sparkfun/network_rtc_2.py | cbc831f221eb08ca88a785b316fbdade9e06d1b2 | [] | no_license | pascal1062/micropython | f4008b87b7addfb0908b90dac2f12f67b9feb411 | 1968240d15f4a046e5ba71308ab46a927457685f | refs/heads/main | 2023-07-17T17:59:18.893905 | 2021-08-29T15:23:10 | 2021-08-29T15:23:10 | 401,066,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py |
def set_time():
import network
import urequests
import ujson
from machine import RTC
url = "http://192.168.0.90:1880/currenttime"
wifi = network.WLAN(network.STA_IF)
rtc = RTC()
if wifi.isconnected():
response = urequests.get(url)
if response.status_code == 200: # query success
print("JSON response:\n" + response.text)
# parse JSON
parsed = ujson.loads(response.text) # you can also use parsed = response.json()
datetime_str = str(parsed["datetime"])
year = int(datetime_str[0:4])
month = int(datetime_str[5:7])
day = int(datetime_str[8:10])
hour = int(datetime_str[11:13])
minute = int(datetime_str[14:16])
second = int(datetime_str[17:19])
subsecond = 0
rtc.datetime((year, month, day, 0, hour, minute, second, subsecond))
print("RTC updated\n")
#End
| [
"noreply@github.com"
] | pascal1062.noreply@github.com |
942731729f219217e64b0c6be4991e8bfd33277f | 7e2529fec680006e01d3688bb33115e9a97ee1ac | /hackerrank/cutTheSticks.py | e62c609436c1bd910f791d9e510f2136d8c6cc9f | [] | no_license | JamesWo/Algorithms | d3c6155e655e40704d42084b7d2236eb01ccb91c | e4a13f06b3902f23389e89f00832886a495ced42 | refs/heads/master | 2021-01-17T06:09:35.968583 | 2016-08-03T17:05:54 | 2016-08-03T17:05:54 | 36,084,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #https://www.hackerrank.com/challenges/cut-the-sticks
# Enter your code here. Read input from STDIN. Print output to STDOUT
numSticks = input()
print numSticks
remainingSticks = numSticks
sticks = map(int, raw_input().split(" "))
sticks.sort()
index = 0
while (index+1) < len(sticks):
index += 1
remainingSticks -= 1
currCount = 0
value = sticks[index]
while sticks[index] == sticks[index-1]:
index += 1
remainingSticks -= 1
print remainingSticks
| [
"jameswo@berkeley.edu"
] | jameswo@berkeley.edu |
64de19db9c16d1cb37f188e8734ed6bc7a683bc3 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/l10n_ar/models/res_partner_bank.py | 5f5104c356fdebe62bc9b36b803cb5a652bb8da7 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 1,675 | py | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, api, _
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
try:
from stdnum.ar.cbu import validate as validate_cbu
except ImportError:
import stdnum
_logger.warning("stdnum.ar.cbu is avalaible from stdnum >= 1.6. The one installed is %s" % stdnum.__version__)
def validate_cbu(number):
def _check_digit(number):
"""Calculate the check digit."""
weights = (3, 1, 7, 9)
check = sum(int(n) * weights[i % 4] for i, n in enumerate(reversed(number)))
return str((10 - check) % 10)
number = stdnum.util.clean(number, ' -').strip()
if len(number) != 22:
raise ValidationError('Invalid Length')
if not number.isdigit():
raise ValidationError('Invalid Format')
if _check_digit(number[:7]) != number[7]:
raise ValidationError('Invalid Checksum')
if _check_digit(number[8:-1]) != number[-1]:
raise ValidationError('Invalid Checksum')
return number
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
@api.model
def _get_supported_account_types(self):
""" Add new account type named cbu used in Argentina """
res = super()._get_supported_account_types()
res.append(('cbu', _('CBU')))
return res
@api.model
def retrieve_acc_type(self, acc_number):
try:
validate_cbu(acc_number)
except Exception:
return super().retrieve_acc_type(acc_number)
return 'cbu'
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
f20051d3520c006f6e93f0dd5d0c2b82242fdb65 | 8633ec7985ffd7f849210b93bc20e632f8ae8707 | /rereco/additional_rereco/EvtGeneration/conf_files_cmsdr/SingleEl/SingleElectronPt10_RECO.py | 9580777303bf223fbc588f75970dc744677249b6 | [] | no_license | liis/el_track | 2ed5b3b7a64d57473328df0e5faf28808bab6166 | cd7978e5fa95d653bab5825b940911b465172c1a | refs/heads/master | 2016-09-10T20:09:07.882261 | 2015-01-08T14:41:59 | 2015-01-08T14:41:59 | 14,494,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,851 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1 --conditions PRE_STA71_V4::All -s RAW2DIGI,L1Reco,RECO --datatier GEN-SIM-RECO-RECODEBUG -n -1 --magField 38T_PostLS1 --eventcontent RECODEBUG --io ./outfiles/testing/SingleElectronPt10_RECO.io --python ./conf_files_cmsdr/testing/SingleElectronPt10_RECO.py --filein file:./outfiles/testing/SingleElectronPt10_RAW.root --fileout file:./outfiles/testing/SingleElectronPt10_RECO.root
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:SingleElectronPt10_RAW.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('--customise nevts:-1'),
name = cms.untracked.string('Applications')
)
# Output definition
outfile="SingleElectronPt10_RECO.root"
print "Saving output to: " + outfile
process.RECODEBUGoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECODEBUGEventContent.outputCommands,
fileName = cms.untracked.string('file:' + outfile ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO-RECODEBUG')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'PRE_STA71_V4::All', '')
process.RECODEBUGoutput.outputCommands.append('keep *PSimHits_g4SimHits_*_*') # needed for SimHitTPAssocProducer (not saved in RECODEBUG, but only in FEVTDEBUG)
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECODEBUGoutput_step = cms.EndPath(process.RECODEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECODEBUGoutput_step)
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1
#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1(process)
# End of customisation functions
| [
"polaarrebane@gmail.com"
] | polaarrebane@gmail.com |
4d76d12c7c683eeb8987fa016591c981bc9da2f8 | b58f43f49559265584d0bac330993d6e68729499 | /FixValueStopLoss.py | 64d14ba8fc41aaa9ab9a6d49faf392114dd0a4a6 | [] | no_license | xiehai1983/MyPyLib | c096c3c60837db4b34a26aed88794a10a0f6b1e8 | 9d8e18443dac42325bb11525112deb59eb49ab9b | refs/heads/master | 2021-09-21T22:23:38.478730 | 2018-09-01T16:29:21 | 2018-09-01T16:29:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,482 | py | # -*- coding: utf-8 -*-
"""
策略本身不带出场,全靠止盈止损出场,所以在止损取数时没有下限
从进场点开始,while True向下取数(还要判断是否达到原始数据下限),如果达到止损或者止盈点,就break出来
使用1min的high和low来模拟tick,1min数据不做阴阳线预处理,如果1min同时满足止盈和止损,则取止损作为结果
"""
import pandas as pd
import DATA_CONSTANTS as DC
import numpy as np
import os
import ResultStatistics as RS
import multiprocessing
def fix_value_stop_loss(strategyName, symbolInfo, K_MIN, setname, bar1mdic, barxmdic, result_para_dic, spr, slr, tofolder, indexcols):
print ("fix_value_stop_loss: setname:%s, spr%.1f slr%.1f" % (setname, spr, slr))
positionRatio = result_para_dic['positionRatio']
initialCash = result_para_dic['initialCash']
symbol = symbolInfo.domain_symbol
bt_folder = "%s %d backtesting\\" % (symbol, K_MIN)
oprdf = pd.read_csv(bt_folder + strategyName + ' ' + symbol + str(K_MIN) + ' ' + setname + ' result.csv')
symbolDomainDic = symbolInfo.amendSymbolDomainDicByOpr(oprdf)
bar1m = DC.getDomainbarByDomainSymbol(symbolInfo.getSymbolList(), bar1mdic, symbolDomainDic)
barxm = DC.getDomainbarByDomainSymbol(symbolInfo.getSymbolList(), barxmdic, symbolDomainDic)
#bar1m.set_index('utc_time', inplace=True)
barxm.set_index('utc_time', inplace=True)
oprdf['new_closeprice'] = oprdf['closeprice']
oprdf['new_closetime'] = oprdf['closetime']
oprdf['new_closeindex'] = oprdf['closeindex']
oprdf['new_closeutc'] = oprdf['closeutc']
oprdf['max_opr_gain'] = 0 # 本次操作期间的最大收益
oprdf['min_opr_gain'] = 0 # 本次操作期间的最小收益
oprdf['max_dd'] = 0
oprnum = oprdf.shape[0]
pricetick = symbolInfo.getPriceTick()
worknum = 0
for i in range(oprnum):
opr = oprdf.iloc[i]
#startutc = (barxm.loc[barxm['utc_time'] == opr.openutc]).iloc[0].utc_endtime - 60 # 从开仓的10m线结束后开始
#endutc = (barxm.loc[barxm['utc_time'] == opr.closeutc]).iloc[0].utc_endtime # 一直到平仓的10m线结束
openutc = opr.openutc
openprice = opr.openprice
startutc = barxm.loc[openutc].utc_endtime - 60
#spv = barxm.iloc[openutc].ATR * spr
#slv = barxm.iloc[openutc].ATR * slr
spv = 5 # 固定取值
slv = 8 # 固定取值
oprtype = opr.tradetype
openprice = opr.openprice
start_index_1m = bar1m[bar1m['utc_time'].isin([startutc])].index[0] # 开仓位置在1m数据中的index,要从下一根开始算止盈止损
while True:
start_index_1m += 1
high_1m = bar1m.loc[start_index_1m,'high']
low_1m = bar1m.loc[start_index_1m].low
if oprtype == 1:
if low_1m <= (openprice - slv):
# 最低值达到止损门限
oprdf.ix[i, 'new_closeprice'] = openprice - slv
oprdf.ix[i, 'new_closetime'] = bar1m.iloc[start_index_1m].strtime
oprdf.ix[i, 'new_closeindex'] = start_index_1m
oprdf.ix[i, 'new_closeutc'] = bar1m.iloc[start_index_1m].utc_time
break
elif high_1m >= (openprice + spv):
# 最大值达到止盈门限
oprdf.ix[i, 'new_closeprice'] = openprice + spv
oprdf.ix[i, 'new_closetime'] = bar1m.iloc[start_index_1m].strtime
oprdf.ix[i, 'new_closeindex'] = start_index_1m
oprdf.ix[i, 'new_closeutc'] = bar1m.iloc[start_index_1m].utc_time
break
elif oprtype == -1:
if high_1m >= (openprice + slv):
# 最大值达到止损门限
oprdf.ix[i, 'new_closeprice'] = openprice + slv
oprdf.ix[i, 'new_closetime'] = bar1m.iloc[start_index_1m].strtime
oprdf.ix[i, 'new_closeindex'] = start_index_1m
oprdf.ix[i, 'new_closeutc'] = bar1m.iloc[start_index_1m].utc_time
break
elif low_1m <= (openprice - spv):
# 最大值达到止盈门限
oprdf.ix[i, 'new_closeprice'] = openprice - spv
oprdf.ix[i, 'new_closetime'] = bar1m.iloc[start_index_1m].strtime
oprdf.ix[i, 'new_closeindex'] = start_index_1m
oprdf.ix[i, 'new_closeutc'] = bar1m.iloc[start_index_1m].utc_time
break
else:
# 被去极值的操作,oprtype为0,不做止损操作
pass
slip = symbolInfo.getSlip()
# 2017-12-08:加入滑点
oprdf['new_ret'] = ((oprdf['new_closeprice'] - oprdf['openprice']) * oprdf['tradetype']) - slip
oprdf['new_ret_r'] = oprdf['new_ret'] / oprdf['openprice']
# 去极值:在parallel的去极值结果上,把极值的new_ret和new_ret_r值0
if result_para_dic['remove_polar_switch']:
oprdf.loc[oprdf['tradetype']==0, 'new_ret'] = 0
oprdf.loc[oprdf['tradetype']==0, 'new_ret_r'] = 0
oprdf['new_commission_fee'], oprdf['new_per earn'], oprdf['new_own cash'], oprdf['new_hands'] = RS.calcResult(oprdf,
symbolInfo,
initialCash,
positionRatio, ret_col='new_ret')
# 保存新的result文档
oprdf.to_csv(tofolder + strategyName + ' ' + symbol + str(K_MIN) + ' ' + setname + ' resultDSL_by_tick.csv', index=False)
olddailydf = pd.read_csv(strategyName + ' ' + symbol + str(K_MIN) + ' ' + setname + ' dailyresult.csv', index_col='date')
# 计算统计结果
oldr = RS.getStatisticsResult(oprdf, False, indexcols, olddailydf)
barxm.reset_index(drop=False, inplace=True)
dailyK = DC.generatDailyClose(barxm)
dR = RS.dailyReturn(symbolInfo, oprdf, dailyK, initialCash) # 计算生成每日结果
dR.calDailyResult()
dR.dailyClose.to_csv((tofolder + strategyName + ' ' + symbol + str(K_MIN) + ' ' + setname + ' dailyresultDSL_by_tick.csv'))
newr = RS.getStatisticsResult(oprdf, True, indexcols, dR.dailyClose)
del oprdf
# return [setname,slTarget,worknum,oldendcash,oldAnnual,oldSharpe,oldDrawBack,oldSR,newendcash,newAnnual,newSharpe,newDrawBack,newSR,max_single_loss_rate]
print newr
return [setname, spr, slr, worknum] + oldr + newr
if __name__ == '__main__':
import datetime
# 参数配置
exchange_id = 'SHFE'
sec_id = 'RB'
symbol = '.'.join([exchange_id, sec_id])
K_MIN = 600
topN = 5000
pricetick = DC.getPriceTick(symbol)
slip = pricetick
starttime = '2016-01-01'
endtime = '2018-03-31'
# 优化参数
stoplossStep = -0.002
# stoplossList = np.arange(-0.022, -0.042, stoplossStep)
stoplossList = [-0.022]
# 文件路径
currentpath = DC.getCurrentPath()
bar1m = DC.getBarData(symbol=symbol, K_MIN=60, starttime=starttime + ' 00:00:00', endtime=endtime + ' 00:00:00')
barxm = DC.getBarData(symbol=symbol, K_MIN=K_MIN, starttime=starttime + ' 00:00:00', endtime=endtime + ' 00:00:00')
# bar1m计算longHigh,longLow,shortHigh,shortLow
bar1m['longHigh'] = bar1m['high']
bar1m['shortHigh'] = bar1m['high']
bar1m['longLow'] = bar1m['low']
bar1m['shortLow'] = bar1m['low']
bar1m['highshift1'] = bar1m['high'].shift(1).fillna(0)
bar1m['lowshift1'] = bar1m['low'].shift(1).fillna(0)
bar1m.loc[bar1m['open'] < bar1m['close'], 'longHigh'] = bar1m['highshift1']
bar1m.loc[bar1m['open'] > bar1m['close'], 'shortLow'] = bar1m['lowshift1']
timestart = datetime.datetime.now()
dslCal(symbol, K_MIN, 'Set0 MS3 ML8 KN6 DN6', bar1m, barxm, pricetick, slip, -0.022, currentpath + '\\')
timedsl = timestart - datetime.datetime.now()
timestart = datetime.datetime.now()
fastDslCal(symbol, K_MIN, 'Set0 MS3 ML8 KN6 DN6', bar1m, barxm, pricetick, slip, -0.022, currentpath + '\\')
timefast = timestart - datetime.datetime.now()
print "time dsl cost:", timedsl
print "time fast cost:", timefast
print 'fast delta:', timefast - timedsl
| [
"smartgang@126.com"
] | smartgang@126.com |
756dbc3df7bc68f0e80a6229fbfb208cda5d9bf9 | 874fc4e4eac88ccd037110ce5f48b930c83bb4b3 | /db/actions/add_field.py | bc0d3f11c3b84886a92cb5707566761baf40466e | [] | no_license | persontianshuang/mafter | d0231519d9e82bd0a6aa8e42aa11a5a8a37c407c | 64d9382917bffc16f0422e0fe6e300f48c95c79a | refs/heads/master | 2021-01-23T16:25:34.535702 | 2017-09-27T09:16:03 | 2017-09-27T09:16:03 | 102,740,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from db.config import _Db
from db.sentences.sentence import Sentences
from db.subFlow.flow import Flow
# for x in _Db['sentences'].find():
# print(x)
# for x in Sentences.objects.all():
# x.type = 'video'
# x.save()
# for x in Flow.objects.all():
# x.type = 'video'
# x.save()
new_flow = Flow()
new_flow.name = '能力考N3'
new_flow.type = 'ntext'
new_flow.save()
| [
"mengyouhan@gmail.com"
] | mengyouhan@gmail.com |
57f4ee94bd87e3f3ad1a8d105c30c3bc127bd6c7 | 1513d0d708b8789f8d85fbd2a8ff46e863d16cd6 | /day_two/Exercise1.py | 6fbb7e0133dd5189518d654e7df31d1a7676ca4c | [] | no_license | zingpython/february2018 | ff9d0f64d6f68d5b0f22b87eaab202d06a85f224 | 0edcdd85bfbec168c7daf5a88bb06ce1b58062f7 | refs/heads/master | 2021-05-04T05:34:58.032678 | 2018-02-22T18:40:05 | 2018-02-22T18:40:05 | 120,341,634 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
for number in range(1, 101):
if number % 3 == 0 and number % 5 == 0:
print("FizzBuzz")
elif number % 3 == 0:
print("Fizz")
elif number % 5 == 0:
print("Buzz")
else:
print(number) | [
"selpathor@verizon.net"
] | selpathor@verizon.net |
f6dd1ef36f6a06a7afb6323e9d3df94e4689cc62 | db053c220094368ecb784fbe62375378c97457c2 | /810.chalkboard-xor-game.py | 541ac93fa7085e6032b36a1b6febb1ee272fdd8d | [] | no_license | thegamingcoder/leetcode | 8c16e7ac9bda3e34ba15955671a91ad072e87d94 | 131facec0a0c70d319982e78e772ed1cb94bc461 | refs/heads/master | 2020-03-22T14:51:45.246495 | 2018-07-09T00:00:06 | 2018-07-09T00:00:06 | 140,211,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #
# [828] Chalkboard XOR Game
#
# https://leetcode.com/problems/chalkboard-xor-game/description/
#
# algorithms
# Hard (38.94%)
# Total Accepted: 1.4K
# Total Submissions: 3.5K
# Testcase Example: '[1,1,2]'
#
# We are given non-negative integers nums[i] which are written on a
# chalkboard. Alice and Bob take turns erasing exactly one number from the
# chalkboard, with Alice starting first. If erasing a number causes the
# bitwise XOR of all the elements of the chalkboard to become 0, then that
# player loses. (Also, we'll say the bitwise XOR of one element is that
# element itself, and the bitwise XOR of no elements is 0.)
#
# Also, if any player starts their turn with the bitwise XOR of all the
# elements of the chalkboard equal to 0, then that player wins.
#
# Return True if and only if Alice wins the game, assuming both players play
# optimally.
#
#
# Example:
# Input: nums = [1, 1, 2]
# Output: false
# Explanation:
# Alice has two choices: erase 1 or erase 2.
# If she erases 1, the nums array becomes [1, 2]. The bitwise XOR of all the
# elements of the chalkboard is 1 XOR 2 = 3. Now Bob can remove any element he
# wants, because Alice will be the one to erase the last element and she will
# lose.
# If Alice erases 2 first, now nums becomes [1, 1]. The bitwise XOR of all the
# elements of the chalkboard is 1 XOR 1 = 0. Alice will lose.
#
#
#
# Notes:
#
#
# 1 <= N <= 1000.
# 0 <= nums[i] <= 2^16.
#
#
#
#
#
class Solution(object):
def xorGame(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
| [
"sharanbale@yahoo-inc.com"
] | sharanbale@yahoo-inc.com |
fce3df2153532f4e0401e80f02abcd99ab77ed8f | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/matplotlib/tests/test_gridspec.py | 70d1ee132851d785ff973695a03dadb7b10f2947 | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 626 | py | import matplotlib.gridspec as gridspec
import pytest
def test_equal():
gs = gridspec.GridSpec(2, 1)
assert gs[0, 0] == gs[0, 0]
assert gs[:, 0] == gs[:, 0]
def test_width_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, width_ratios=[2, 1, 3])
def test_height_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, height_ratios=[2, 1, 3])
| [
"kitae.yoon@deliveryhero.co.kr"
] | kitae.yoon@deliveryhero.co.kr |
6b87d5ce8490d1eb8056fb41b49cc0fa2608ceee | d1ef84d05beedc811161314800193ded398bff07 | /tests/test_database_crudmixin.py | 1767d5202f8df4c91803f6ee4b79e1def990b02d | [
"MIT"
] | permissive | spookey/observatory | 8f4a98aeb214182124bc6a4ab6d1ddac697cd0bc | be5cc92f53f12e6341e7e3040f26360e54cfdf7d | refs/heads/master | 2023-04-22T03:31:34.879735 | 2021-01-16T17:50:07 | 2021-01-16T17:50:07 | 224,500,136 | 0 | 0 | MIT | 2021-05-12T03:53:02 | 2019-11-27T19:11:24 | Python | UTF-8 | Python | false | false | 3,032 | py | from pytest import mark
from observatory.database import TXT_LEN_SHORT, CRUDMixin
from observatory.start.extensions import DB
# pylint: disable=no-member
PAYLOAD = 'omg wtf bbq'
LAYPOAD = 'napfkuchen!'
class CRUDMixinPhony(CRUDMixin, DB.Model):
prime = DB.Column(DB.Integer(), primary_key=True)
value = DB.Column(DB.String(length=TXT_LEN_SHORT))
@mark.usefixtures('session')
class TestCRUDMixin:
@staticmethod
def test_create_no_commit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=False)
assert crud.prime is None
assert crud.value == PAYLOAD
assert crud in CRUDMixinPhony.query.all()
@staticmethod
def test_create_commit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=True)
assert crud.prime == 1
assert crud.value == PAYLOAD
assert crud in CRUDMixinPhony.query.all()
@staticmethod
def test_update_no_comit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=False)
assert crud.value == PAYLOAD
crud.update(value=LAYPOAD, _commit=False)
assert crud.value == LAYPOAD
@staticmethod
def test_update_comit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=True)
assert crud.value == PAYLOAD
crud.update(value=LAYPOAD, _commit=True)
assert crud.value == LAYPOAD
@staticmethod
def test_save_no_commit(session):
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=False)
assert crud not in session.dirty
crud.value = LAYPOAD
assert crud not in session.dirty
crud.save(_commit=False)
assert crud not in session.dirty
@staticmethod
def test_save_commit(session):
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=True)
assert crud not in session.dirty
crud.value = LAYPOAD
assert crud in session.dirty
crud.save(_commit=True)
assert crud not in session.dirty
@staticmethod
def test_delete_no_commit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=False)
assert crud in CRUDMixinPhony.query.all()
crud.delete(_commit=False)
assert crud not in CRUDMixinPhony.query.all()
@staticmethod
def test_delete_commit():
crud = CRUDMixinPhony.create(value=PAYLOAD, _commit=True)
assert crud in CRUDMixinPhony.query.all()
crud.delete(_commit=True)
assert crud not in CRUDMixinPhony.query.all()
@staticmethod
def test_logging(caplog):
crud = CRUDMixinPhony.create(value='yes', _commit=True)
log_c, log_s = caplog.records[-2:]
assert 'creating' in log_c.message.lower()
assert 'saving' in log_s.message.lower()
crud.update(value='no')
log_u, log_s = caplog.records[-2:]
assert 'updating' in log_u.message.lower()
assert 'saving' in log_s.message.lower()
crud.delete()
log_d = caplog.records[-1]
assert 'deleting' in log_d.message.lower()
| [
"frieder.griesshammer@der-beweis.de"
] | frieder.griesshammer@der-beweis.de |
c291d5f571a0f7d5576a959395261c1c80e20196 | 6879a8596df6f302c63966a2d27f6b4d11cc9b29 | /abc/problems110/108/c.py | 01a176ce06f31360c7967885d3140fefde3cf214 | [] | no_license | wkwkgg/atcoder | 41b1e02b88bf7a8291b709306e54cb56cb93e52a | 28a7d4084a4100236510c05a88e50aa0403ac7cd | refs/heads/master | 2020-07-26T03:47:19.460049 | 2020-03-01T18:29:57 | 2020-03-01T18:29:57 | 208,523,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | N, K = map(int, input().split())
nums = [0] * K
for i in range(1, N + 1):
nums[i % K] += 1
ans = 0
for i in range(K):
b = (K - i) % K
c = (K - i) % K
if (b + c) % K != 0: continue
ans += nums[i] * nums[b] * nums[c]
print(ans)
| [
"yujin@komachi.live"
] | yujin@komachi.live |
9b21a1d828f30ab5a1d04f765922419abe11a89c | a5408385bc6cc06cbc783652bd4d019af184ca7c | /examples/diffusion/sinbc.py | 5020f09b30d418719f4cbb6a07543487260f4fb9 | [
"BSD-3-Clause"
] | permissive | snilek/sfepy | 5a65d2e49c1d49d1a50f1d6d080f6e0f2f78e9f0 | 7f50684441cbbd3c7497cb32ba63ae4d1bf3ce28 | refs/heads/master | 2021-01-15T12:36:10.195016 | 2014-05-06T11:59:02 | 2014-05-06T11:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,826 | py | r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
This example demonstrates how to use a hierarchical basis approximation - it
uses the fifth order Lobatto polynomial space for the solution. The adaptive
linearization is applied in order to save viewable results, see both the
options keyword and the ``post_process()`` function that computes the solution
gradient. Use the following commands to view the results (assuming default
output directory and names)::
$ ./postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
$ ./postproc.py -b 2_4_2_refined_grad.vtk
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the original
two-element mesh before the actual solution.
"""
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, Domain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = Domain(mesh.name, mesh)
for ii in range(3):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
domain.mesh.name = '2_4_2_refined'
return domain.mesh
elif mode == 'write':
pass
def post_process(out, pb, state, extend=False):
"""
Calculate gradient of the solution.
"""
from sfepy.discrete.fem.fields_base import create_expression_output
aux = create_expression_output('ev_grad.ie.Elements( t )',
'grad', 'temperature',
pb.fields, pb.get_materials(),
pb.get_variables(), functions=pb.functions,
mode='qp', verbose=False,
min_level=0, max_level=5, eps=1e-3)
out.update(aux)
return out
filename_mesh = UserMeshIO(mesh_hook)
# Get the mesh bounding box.
io = MeshIO.any_from_filename(base_mesh)
bbox, dim = io.read_bounding_box(ret_dim=True)
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'post_process',
'linearization' : {
'kind' : 'adaptive',
'min_level' : 0, # Min. refinement level to achieve everywhere.
'max_level' : 5, # Max. refinement level.
'eps' : 1e-3, # Relative error tolerance.
},
}
materials = {
'coef' : ({'val' : 1.0},),
}
regions = {
'Omega' : 'all',
}
regions.update(define_box_regions(dim, bbox[0], bbox[1], 1e-5))
fields = {
'temperature' : ('real', 1, 'Omega', 5, 'H1', 'lobatto'),
# Compare with the Lagrange basis.
## 'temperature' : ('real', 1, 'Omega', 5, 'H1', 'lagrange'),
}
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
amplitude = 1.0
def ebc_sin(ts, coor, **kwargs):
x0 = 0.5 * (coor[:, 1].min() + coor[:, 1].max())
val = amplitude * nm.sin( (coor[:, 1] - x0) * 2. * nm.pi )
return val
ebcs = {
't1' : ('Left', {'t.0' : 'ebc_sin'}),
't2' : ('Right', {'t.0' : -0.5}),
't3' : ('Top', {'t.0' : 1.0}),
}
functions = {
'ebc_sin' : (ebc_sin,),
}
equations = {
'Temperature' : """dw_laplace.10.Omega( coef.val, s, t ) = 0"""
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
80949b51021d641887cbf7cfdd89a8444cd9394f | 664bb3b0d806b3d17b1f4c5b87e569dcafac9710 | /0x03-python-data_structures/8-multiple_returns.py | cb9621277c528249e82d3c718e5867e6060a5b74 | [] | no_license | emmanavarro/holbertonschool-higher_level_programming | 9f120234b0521ad8330307af303f5f587764f30a | 2cae27d29d11035f62742240e1d1a5e385be075c | refs/heads/master | 2022-12-25T22:24:36.183806 | 2020-09-24T23:35:59 | 2020-09-24T23:35:59 | 259,382,761 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | #!/usr/bin/python3
def multiple_returns(sentence):
if sentence is "":
tupl = (len(sentence), None)
return tupl
else:
tupl = (len(sentence), sentence[0])
return tupl
| [
"elnavarro55@gmail.com"
] | elnavarro55@gmail.com |
681013f7bacd0db8a1b4d25d995954b7fe7df8ed | 43df78355915e3f41f432579c5840816f52a8ace | /Functions/Two/Calc_NDM.py | e484e15cd3977780679d69c0bc37613e93af8a36 | [
"Apache-2.0"
] | permissive | spareeth/wa | fd7617fafe065de83249cf817df25cf9ca164518 | 57bb0c93af1bab3b6f8bc30cbc941aa14ac2696b | refs/heads/master | 2020-03-06T15:33:26.208373 | 2018-03-22T08:04:06 | 2018-03-22T08:04:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,117 | py | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Function/Two
"""
# import general python modules
import os
import gdal
import numpy as np
import pandas as pd
import glob
def NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate):
"""
This functions calculated monthly NDM based on the yearly NPP and monthly GPP.
Parameters
----------
Dir_Basin : str
Path to all the output data of the Basin
Data_Path_GPP : str
Path from the Dir_Basin to the GPP data
Data_Path_NPP : str
Path from the Dir_Basin to the NPP data
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Simulation : int
Defines the simulation
Returns
-------
Data_Path_NDM : str
Path from the Dir_Basin to the normalized dry matter data
"""
# import WA+ modules
import wa.General.data_conversions as DC
import wa.General.raster_conversions as RC
# Define output folder for Normalized Dry Matter
Data_Path_NDM = "NDM"
out_folder = os.path.join(Dir_Basin, Data_Path_NDM)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
# Define input folders
GPP_folder = os.path.join(Dir_Basin, Data_Path_GPP)
NPP_folder = os.path.join(Dir_Basin, Data_Path_NPP)
# Define monthly time steps that will be created
Dates = pd.date_range(Startdate, Enddate, freq = 'MS')
# Define the years that will be calculated
Year_Start = int(Startdate[0:4])
Year_End = int(Enddate[0:4])
Years = range(Year_Start, Year_End+1)
# Loop over the years
for year in Years:
# Change working directory to the NPP folder
os.chdir(NPP_folder)
# Open yearly NPP data
yearly_NPP_File = glob.glob('*yearly*%d.01.01.tif' %int(year))[0]
Yearly_NPP = RC.Open_tiff_array(yearly_NPP_File)
# Get the No Data Value of the NPP file
dest = gdal.Open(yearly_NPP_File)
NDV = dest.GetRasterBand(1).GetNoDataValue()
# Set the No Data Value to Nan
Yearly_NPP[Yearly_NPP == NDV] = np.nan
# Change working directory to the GPP folder
os.chdir(GPP_folder)
# Find all the monthly files of that year
monthly_GPP_Files = glob.glob('*monthly*%d.*.01.tif' %int(year))
# Check if it are 12 files otherwise something is wrong and send the ERROR
if not len(monthly_GPP_Files) == 12:
print 'ERROR: Some monthly GPP Files are missing'
# Get the projection information of the GPP inputs
geo_out, proj, size_X, size_Y = RC.Open_array_info(monthly_GPP_Files[0])
if int(proj.split('"')[-2]) == 4326:
proj = "WGS84"
# Get the No Data Value of the GPP files
dest = gdal.Open(monthly_GPP_Files[0])
NDV = dest.GetRasterBand(1).GetNoDataValue()
# Create a empty numpy array
Yearly_GPP = np.zeros([size_Y, size_X])
# Calculte the total yearly GPP
for monthly_GPP_File in monthly_GPP_Files:
# Open array
Data = RC.Open_tiff_array(monthly_GPP_File)
# Remove nan values
Data[Data == NDV] = np.nan
# Add data to yearly sum
Yearly_GPP += Data
# Loop over the monthly dates
for Date in Dates:
# If the Date is in the same year as the yearly NPP and GPP
if Date.year == year:
# Create empty GPP array
monthly_GPP = np.ones([size_Y, size_X]) * np.nan
# Get current month
month = Date.month
# Get the GPP file of the current year and month
monthly_GPP_File = glob.glob('*monthly_%d.%02d.01.tif' %(int(year), int(month)))[0]
monthly_GPP = RC.Open_tiff_array(monthly_GPP_File)
monthly_GPP[monthly_GPP == NDV] = np.nan
# Calculate the NDM based on the monthly and yearly NPP and GPP (fraction of GPP)
Monthly_NDM = Yearly_NPP * monthly_GPP / Yearly_GPP * (30./12.) *10000 # kg/ha
# Define output name
output_name = os.path.join(out_folder, 'NDM_MOD17_kg_ha-1_monthly_%d.%02d.01.tif' %(int(year), int(month)))
# Save the NDM as tiff file
DC.Save_as_tiff(output_name, Monthly_NDM, geo_out, proj)
return(Data_Path_NDM)
| [
"timhessels@hotmail.com"
] | timhessels@hotmail.com |
73afcff6d269bc975c7d86680ee09916cd096372 | 36407bb880c5ca94331f1bc44b85be58bba6f352 | /apps/rating/migrations/0001_initial.py | 6e902a0de600f12a5514e21ac3aec25a4d1f5c37 | [] | no_license | raw-data-tech/home-garden | 28175f66d126fa57f652fce22cd89ab44c514bba | 8767815b010b3d7d83927e912b3e055374c06111 | refs/heads/master | 2020-12-24T17:17:05.629239 | 2015-07-11T09:05:52 | 2015-07-11T09:05:52 | 38,920,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rating', models.PositiveIntegerField(verbose_name=b'rating', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),
('remark', models.CharField(max_length=200, null=True, blank=True)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('order', models.ForeignKey(related_name=b'ratings', to='orders.Order')),
('user', models.ForeignKey(related_name=b'ratings', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| [
"navajyothms1989@gmail.com"
] | navajyothms1989@gmail.com |
d29a74acd9b141fc75873408944238d813de7f96 | d2189145e7be2c836017bea0d09a473bf1bc5a63 | /Reposicion_cuarta clase/contar los numeros multiplos de 3 que hay entre 1-100.py | 45d3f1bff695c5720b917c501d80e6ad0046391f | [] | no_license | emilianoNM/Tecnicas3 | 12d10ce8d78803c8d2cd6a721786a68f7ee2809d | 6ad7f0427ab9e23643a28ac16889bca8791421d0 | refs/heads/master | 2020-03-25T18:06:34.126165 | 2018-11-24T04:42:14 | 2018-11-24T04:42:14 | 144,013,045 | 3 | 5 | null | 2018-09-14T10:47:26 | 2018-08-08T12:49:57 | Python | UTF-8 | Python | false | false | 226 | py | ### Imprimir y contar los numeros multiplos de 3 que hay entre 1 y 100.
n = 1
h = 0
while n < 100:
if n%3 == 0:
print n,
h += 1
n += 1
print '\nEntre 1 y 100 hay %i numeros multiplos de 3' % h
| [
"noreply@github.com"
] | emilianoNM.noreply@github.com |
ed33bc3916a5067ac5211b768fae4fa08ec4d051 | 900aaf3f7d0063ed3b4a90d7afc0e75bb847a1f2 | /hash_tables/group_shifted_strings.py | 33bd6b693a830615e45efabb37aabb75be2ebe35 | [] | no_license | rjcrter11/leetChallenges | 5797fbdd818525af1fec8d2907d03fe9e4c586fb | bfd0ee6221310c88a619ec3203e281f4b0cc8184 | refs/heads/master | 2023-04-21T06:53:02.887548 | 2021-05-24T12:24:13 | 2021-05-24T12:24:13 | 283,039,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | '''Given a string, we can "shift" each of its letter to its successive letter,
for example: "abc" -> "bcd". We can keep "shifting" which forms the sequence:
"abc" -> "bcd" -> ... -> "xyz"
Given a list of non-empty strings which contains only lowercase alphabets,
group all strings that belong to the same shifting sequence.
Example:
Input: ["abc", "bcd", "acef", "xyz", "az", "ba", "a", "z"],
Output:
[
["abc","bcd","xyz"],
["az","ba"],
["acef"],
["a","z"]
]
'''
from collections import defaultdict
def groupStrings(strings):
def diff(s): return tuple((ord(a) - ord(b)) % 26 for a, b in zip(s, s[1:]))
d = defaultdict(list)
for s in strings:
d[diff(s)].append(s)
return d.values()
strings = ["abc", "bcd", "acef", "xyz", "az", "ba", "a", "z"]
print(groupStrings(strings))
| [
"rjcrter11@gmail.com"
] | rjcrter11@gmail.com |
682ce86400bb9c2269a57401c0661a75d61c9b53 | 5174acc0ca3a8582711881dcf6a1a36663e964a9 | /servicios_aplicacion/selector_entrada.py | f35868471f873fe833cdcd7dece7f9bd1a598ca8 | [] | no_license | vvalotto/fiuner_termostato | b9ac7918458d06a479f516bd3f7a2550bb4d6b78 | a3e81040672a438ea512895016201cb93104469e | refs/heads/master | 2020-05-24T05:06:45.940613 | 2019-07-01T10:21:51 | 2019-07-01T10:21:51 | 187,106,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | """
Clase Responsable del establecimiento de la temperatura deseada
"""
from gestores_entidades.gestor_ambiente import *
class SelectorEntradaTemperatura:
def __init__(self, gestor_ambiente):
"""
Arma la clases con la que necesita colaborar
"""
self._seteo_temperatura = Configurador.configurar_seteo_temperatura()
self._selector_temperatura = Configurador.configurar_selector_temperatura()
self._gestor_ambiente = gestor_ambiente
return
def ejecutar(self):
"""
Ejecucion periodica para observar si el usuario quiere
setear la temperatura
En caso que asi sea, se queda ciclando para leer el ingreso
de las consignas
:return:
"""
while self._selector_temperatura.obtener_selector() == "deseada":
self._mostrar_temperatura_deseada()
self._obtener_seteo_temperatura_deseada()
self._gestor_ambiente.indicar_temperatura_a_mostrar("ambiente")
return
def _mostrar_temperatura_deseada(self):
self._gestor_ambiente.indicar_temperatura_a_mostrar("deseada")
self._gestor_ambiente.mostrar_temperatura()
return
def _obtener_seteo_temperatura_deseada(self):
opcion = self._seteo_temperatura.obtener_seteo()
if opcion == "aumentar":
self._gestor_ambiente.aumentar_temperatura_deseada()
if opcion == "disminuir":
self._gestor_ambiente.disminuir_temperatura_deseada()
return
| [
"vvalotto@gmail.com"
] | vvalotto@gmail.com |
8b46d9b6b65b83f63d4903ea99257f96f206a664 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1484496_0/Python/Zunekid/Q3.py | af300c784a31e261db76d4435001ac53e1270e58 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | import string
fn = "C-small-attempt0.in"
f= open(fn,'r')
summap = {}
datas = []
def decode(b1, b2):
list1 = []
list2 = []
place = -1
for x in xrange( len(datas)):
place+=1
if b1 &1 == 1:
list1.append(datas[place])
b1 = b1>>1
if b2 &1 == 1:
list2.append(datas[place])
b2 = b2>>1
if len(list1)>= 1 and len(list2) >=1 :
return (list1, list2)
#print "OMG"
def testadd(newset):
#print newset, summap
for each in newset:
sum, bitmap = each
if summap.has_key(sum):
subs = summap[sum]
for items in subs:
if items & bitmap == 0:
#print "got you"
return decode(items, bitmap)
for each in newset:
sum, bitmap = each
if summap.has_key(sum):
summap[sum].append(bitmap)
else:
summap[sum] = [bitmap]
#else:
#summap[sum].append(bitmap)
#return None
#else:
#summap[sum] = [bitmap]
return None
tcase = int(f.readline())
for tc in xrange(tcase):
line = f.readline()
#print line
linedata = line.split()
n = int(linedata[0])
#print n
summap = {}
datas = []
res = None
for d in xrange(n):
#for d in xrange(3):
data = int(linedata[d+1])
sbm = 1<<d
ns = data
datas.append(data)
newset= [(ns, sbm)]
#res = testadd(ns, sbm)
#if res != None:
# break
#print summap
for k, subs in summap.items():
for bm in subs:
# make union
nbm = (1<<d) | bm
ns = data + k
newset.append( (ns,nbm))
#res = testadd(ns, nbm)
# if res != None:
# break
else:
continue
#if res!= None:
# break
#testadd(newset)
res = testadd(newset)
#print summap
#print
if res != None:
break
if res != None:
print "Case #%d:"%(tc+1)
s1= res[0]
s2 = res[1]
line1 = ""
for i1 in s1:
line1 = line1+ " " + str(i1)
print line1[1:]
line2 = ""
for i2 in s2:
line2 = line2+ " " + str(i2)
print line2[1:]
else:
print "Case #%d:"% (tc+1)
print "Impossible"
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
92942898af7680097c4452f2f8c748ea28e37f73 | 210b968876a8aea36eae94e4720e23f2daa8552b | /cover/cover.py | 3a74eb298af3aee1fe2a798f8583d1fc8cb90267 | [] | no_license | beefoo/coloring-book | 3ce5c0d5497b199cd470f640dd0b3778d545577f | cee77b7f863ddee4323c8c16111d353fe27e5b36 | refs/heads/master | 2021-01-12T09:37:36.490913 | 2017-06-26T19:07:43 | 2017-06-26T19:07:43 | 76,204,083 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,770 | py | # -*- coding: utf-8 -*-
import argparse
import calendar
import csv
import inspect
import math
import os
import svgwrite
from svgwrite import inch, px
import sys
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import lib.svgutils as svgu
import lib.mathutils as mu
# input
parser = argparse.ArgumentParser()
# input source: https://www.ncdc.noaa.gov/monitoring-references/faq/anomalies.php
parser.add_argument('-input', dest="INPUT_FILE", default="data/1880-2016_land_ocean.csv", help="Path to input file")
parser.add_argument('-y0', dest="YEAR_START", type=int, default=1880, help="Year start on viz")
parser.add_argument('-ys', dest="YEAR_STEP", type=int, default=1, help="Year step on viz")
parser.add_argument('-width', dest="WIDTH", type=float, default=8.5, help="Width of output file")
parser.add_argument('-height', dest="HEIGHT", type=float, default=11, help="Height of output file")
parser.add_argument('-pad', dest="PAD", type=float, default=0.25, help="Padding of output file")
parser.add_argument('-output', dest="OUTPUT_FILE", default="data/cover.svg", help="Path to output file")
# init input
args = parser.parse_args()
DPI = 72
PAD = args.PAD * DPI
WIDTH = args.WIDTH * DPI - PAD * 2
HEIGHT = args.HEIGHT * DPI - PAD * 2
YEAR_START = args.YEAR_START
YEAR_STEP = args.YEAR_STEP
values = []
# read csv
with open(args.INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
for skip in range(4):
next(r, None)
# for each row
i = 0
for _year,_value in r:
year = int(_year)
if i % YEAR_STEP <= 0 and year >= YEAR_START:
value = float(_value)
values.append(value)
if year >= YEAR_START:
i += 1
count = len(values)
print "Read %s values from %s" % (count, args.INPUT_FILE)
# svg config
COMPRESS_Y = 0.6667
COMPRESS_X = 0.99
LINE_HEIGHT = 30.0
COLOR = "#A92D2D"
COLOR_ALT = "#000000"
ADD_LINE = False
# svg calculations
chartW = WIDTH * COMPRESS_X
chartH = HEIGHT * COMPRESS_Y
offsetY = HEIGHT * (1-COMPRESS_Y) * 0.5
offsetX = WIDTH * (1-COMPRESS_X) * 0.5
# convert values to points
minValue = min(values)
maxValue = max(values)
points = []
for i, v in enumerate(values):
xp = 1.0 * i / count
yp = 1.0 - (v - minValue) / (maxValue - minValue)
x = chartW * xp + PAD + offsetX
y = chartH * yp + PAD + offsetY
points.append((x, y))
# init svg
dwg = svgwrite.Drawing(args.OUTPUT_FILE, size=(WIDTH+PAD*2, HEIGHT+PAD*2), profile='full')
# diagonal pattern
diagonalSize = 48
diagonalW = 12
diagonalPattern = dwg.pattern(id="diagonal", patternUnits="userSpaceOnUse", size=(diagonalSize,diagonalSize))
commands = svgu.patternDiagonal(diagonalSize, "down")
diagonalPattern.add(dwg.path(d=commands, stroke_width=diagonalW, stroke=COLOR))
dwg.defs.add(diagonalPattern)
# dot pattern
dotSize = 24
dotW = 8
dotPattern = dwg.pattern(id="dot", patternUnits="userSpaceOnUse", size=(dotSize,dotSize))
commands = svgu.patternDiamond(dotSize, dotW)
dotPattern.add(dwg.path(d=commands, fill=COLOR_ALT))
dwg.defs.add(dotPattern)
# simplify points
lineOffset = LINE_HEIGHT * 0.5
points = mu.smoothPoints(points, 1, 2.0)
pointsTop = [(p[0], p[1]-lineOffset) for p in points]
pointsBottom = [(p[0], p[1]+lineOffset) for p in points]
# make path commands
x0 = PAD
x1 = WIDTH + PAD
y0 = HEIGHT + PAD
y1 = PAD
p0 = pointsTop[0]
p1 = pointsTop[-1]
cp = 12
# top curve
commandsTop = svgu.pointsToCurve(pointsTop, 0.1)
commandsTop.append("Q%s,%s %s,%s" % (p1[0]+(x1-p1[0])*0.5, p1[1]-cp, x1, p1[1]))
commandsTop.append("L%s,%s" % (x1, y1))
commandsTop.append("L%s,%s" % (x0, y1))
commandsTop.append("L%s,%s" % (x0, p0[1]))
commandsTop.append("Q%s,%s %s,%s" % (x0+(p0[0]-x0)*0.5, p0[1]-cp, p0[0], p0[1]))
dwg.add(dwg.path(d=commandsTop, fill="url(#dot)"))
p0 = pointsBottom[0]
p1 = pointsBottom[-1]
# bottom curve
commandsBottom = svgu.pointsToCurve(pointsBottom, 0.1)
if ADD_LINE:
line = commandsBottom[:]
line.insert(0, "Q%s,%s %s,%s" % (x0+(p0[0]-x0)*0.5, p0[1]-cp, p0[0], p0[1]))
line.insert(0, "M%s,%s" % (x0, p0[1]))
line.append("Q%s,%s %s,%s" % (p1[0]+(x1-p1[0])*0.5, p1[1]-cp, x1, p1[1]))
dwg.add(dwg.path(d=line, fill="none", stroke=COLOR, stroke_width=20))
commandsBottom.append("Q%s,%s %s,%s" % (p1[0]+(x1-p1[0])*0.5, p1[1]-cp, x1, p1[1]))
commandsBottom.append("L%s,%s" % (x1, y0))
commandsBottom.append("L%s,%s" % (x0, y0))
commandsBottom.append("L%s,%s" % (x0, p0[1]))
commandsBottom.append("Q%s,%s %s,%s" % (x0+(p0[0]-x0)*0.5, p0[1]-cp, p0[0], p0[1]))
dwg.add(dwg.path(d=commandsBottom, fill="url(#diagonal)"))
dwg.save()
print "Saved svg: %s" % args.OUTPUT_FILE
| [
"brian@youaremyjoy.org"
] | brian@youaremyjoy.org |
0a4d1b3f1b01e2f409075865c38cca9c2ae7dd2e | b21e073975c0f7a4f94c9f3523b8f5dcbf98a521 | /en/026/python/main.py | c390f31c0c0145c6631caaf75b09ad7f545675fe | [
"MIT"
] | permissive | franciscogomes2020/exercises | 3ed6877f945463ed01c7fcd55271689171b0ad9d | 8b33c4b9349a9331e4002a8225adc2a482c70024 | refs/heads/master | 2023-07-04T15:54:38.919185 | 2021-08-19T20:03:54 | 2021-08-19T20:03:54 | 396,992,428 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # Make a program that reads a sentence from the keyboard and shows how many times the letter "A" appears, in which position it appears the first time, and in which position it appears the last time.
| [
"71292537+franciscogomes2020@users.noreply.github.com"
] | 71292537+franciscogomes2020@users.noreply.github.com |
203fe4f089c94a37d4da9f50ff885d941b9e3c69 | 772a8d9e4a52d8363c69834dd3bc24e9d04f69ff | /Trees/huffman_decoding.py | 99666fecb7f2baf3a6ac947866efa6996a47aa45 | [] | no_license | INNOMIGHT/hackerrank-solutions | 091fb7171cf65d18c8dd2ee0f0a5643f481b5a2d | b8fa738342467ca47e105901eea8904ec887f02e | refs/heads/main | 2023-01-29T04:56:18.028167 | 2020-12-09T12:16:35 | 2020-12-09T12:16:35 | 310,222,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | def decodeHuff(root, s):
temp = root
result = []
for char in s:
if char == '0':
temp = temp.left
elif char == '1':
temp = temp.right
if temp.left is None and temp.right is None:
result.append(temp.data)
temp = root
print("".join(result))
| [
"iammagnificient@gmail.com"
] | iammagnificient@gmail.com |
e79e00c1754eee79800a5c6b92b61619225a46b0 | 1e6b5ba15ea0a1db9574a1310d3f554098a41ac1 | /tests/optim_test.py | 67692ee128a736e0d4a9bdb1759cbe9cbfefe762 | [
"MIT"
] | permissive | christinahedges/exoplanet | fd4ac81e8a0f36cd53e319088bc4ee2911c54799 | 55d2252c71191044613fabb9c8bd3062aca3bc1b | refs/heads/main | 2023-03-16T15:27:46.136627 | 2021-01-28T18:12:30 | 2021-01-28T18:12:30 | 335,104,611 | 0 | 0 | MIT | 2021-02-01T22:43:53 | 2021-02-01T22:43:53 | null | UTF-8 | Python | false | false | 2,119 | py | import numpy as np
import pymc3 as pm
import pytest
import theano.tensor as tt
from exoplanet import optim as op
from exoplanet.optim import optimize
try:
import torch
except ImportError:
torch = None
def test_optimize(seed=1234):
np.random.seed(seed)
x_val = np.random.randn(5, 3)
with pm.Model():
pm.Normal("x", shape=x_val.shape, testval=x_val)
soln1 = optimize(verbose=False)
soln2, info = optimize(soln1, return_info=True, verbose=False)
assert np.allclose(soln1["x"], 0.0)
assert np.allclose(soln2["x"], 0.0)
assert info.success
def test_optimize_exception(capsys):
with pm.Model():
cov = pm.Normal("cov", mu=np.eye(5), shape=(5, 5))
chol = tt.slinalg.Cholesky(on_error="raise")(cov)
pm.MvNormal("x", mu=np.zeros(5), chol=chol, shape=5)
with pytest.raises(np.linalg.LinAlgError):
optimize({"cov": np.zeros((5, 5))}, verbose=False)
captured = capsys.readouterr()
assert "array:" in captured.out
assert "point:" in captured.out
def rosenbrock(x):
return (1 - x[0]) ** 2 + 100 * (x[1] - x[0] ** 2) ** 2
@pytest.mark.skipif(torch is None, reason="torch is not installed")
@pytest.mark.parametrize(
"kwargs",
[
{},
{"lr": 1e-4},
{"lr": 1e-4, "betas": [0.92, 0.96]},
{"lr": 1e-4, "betas": [0.92, 0.96], "eps": 1e-3},
{"lr": 1e-4, "weight_decay": 0.1},
{"amsgrad": True},
],
)
def test_adam(kwargs, seed=20200520):
np.random.seed(seed)
x0 = np.random.randn(2)
x_torch = torch.tensor(x0, dtype=torch.float64, requires_grad=True)
optimizer = torch.optim.Adam([x_torch], **kwargs)
with pm.Model():
x = pm.Flat("x", shape=2, testval=x0)
pm.Potential("rosenbrock", -rosenbrock(x))
for obj, point in op.optimize_iterator(
op.Adam(**kwargs), 100, vars=[x]
):
optimizer.zero_grad()
loss = rosenbrock(x_torch)
loss.backward()
optimizer.step()
assert np.allclose(x_torch.detach().numpy(), point["x"])
| [
"foreman.mackey@gmail.com"
] | foreman.mackey@gmail.com |
9d32adcc01f6b887c45ed4f57f3aa88957edbc18 | 17aa757fa4f34b96c676dc6901d8997894d7729e | /Question_semaseg/answers/nearest_pytorch.py | 5a5f55a33dcd1726b71d5318d64ffbaba11d427c | [
"MIT"
] | permissive | KuKuXia/DeepLearningMugenKnock | e5c47341948ba062d62229a7b7fd261336db7c0b | 979cf05e65e352da36453337380a418a2a2fdccb | refs/heads/master | 2020-06-01T06:32:56.448012 | 2019-06-06T22:35:39 | 2019-06-06T22:35:39 | 190,679,574 | 1 | 0 | MIT | 2020-01-01T19:06:37 | 2019-06-07T02:47:02 | Python | UTF-8 | Python | false | false | 6,725 | py | import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 64, 64#572, 572
out_height, out_width = 64, 64#388, 388
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__()
self.enc1 = torch.nn.Sequential()
for i in range(2):
f = 3 if i == 0 else 32
self.enc1.add_module("conv1_{}".format(i+1), torch.nn.Conv2d(f, 32, kernel_size=3, padding=1, stride=1))
self.enc1.add_module("conv1_{}_relu".format(i+1), torch.nn.ReLU())
self.enc1.add_module("bn1_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.enc2 = torch.nn.Sequential()
for i in range(2):
self.enc2.add_module("conv2_{}".format(i+1), torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
self.enc2.add_module("conv2_{}_relu".format(i+1), torch.nn.ReLU())
self.enc2.add_module("bn2_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.dec1 = torch.nn.Sequential()
for i in range(2):
self.dec1.add_module("dec1_conv1_{}".format(i+1), torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
self.dec1.add_module("dec1_conv1_{}_relu".format(i+1), torch.nn.ReLU())
self.dec1.add_module("dec1_bn1_{}".format(i+1), torch.nn.BatchNorm2d(32))
self.out = torch.nn.Conv2d(32, num_classes+1, kernel_size=1, padding=0, stride=1)
def forward(self, x):
# block conv1
x = self.enc1(x)
x = F.max_pool2d(x, 2)
x = self.enc2(x)
x = torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')
x = self.dec1(x)
x = self.out(x)
return x
CLS = {'akahara': [0,0,128],
'madara': [0,128,0]}
# get train data
def data_load(path, hf=False, vf=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
gt_path = path.replace("images", "seg_images").replace(".jpg", ".png")
gt = cv2.imread(gt_path)
gt = cv2.resize(gt, (out_width, out_height), interpolation=cv2.INTER_NEAREST)
t = np.zeros((out_height, out_width), dtype=np.int)
for i, (_, vs) in enumerate(CLS.items()):
ind = (gt[...,0] == vs[0]) * (gt[...,1] == vs[1]) * (gt[...,2] == vs[2])
t[ind] = i+1
#print(gt_path)
#import matplotlib.pyplot as plt
#plt.subplot(1,2,1)
#plt.imshow(x)
#plt.subplot(1,2,2)
#plt.imshow(t, vmin=0, vmax=2)
#plt.show()
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t[:, ::-1])
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t[::-1])
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t[::-1, ::-1])
paths.append(path)
xs = np.array(xs)
ts = np.array(ts)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = Mynet().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)
# training
mb = 4
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(500):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)
opt.zero_grad()
y = model(x)
y = y.permute(0,2,3,1).contiguous()
y = y.view(-1, num_classes+1)
t = t.view(-1)
y = F.log_softmax(y, dim=1)
loss = torch.nn.CrossEntropyLoss()(y, t)
loss.backward()
opt.step()
pred = y.argmax(dim=1, keepdim=True)
acc = pred.eq(t.view_as(pred)).sum().item() / mb
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
# test
def test():
device = torch.device("cuda" if GPU else "cpu")
model = Mynet().to(device)
model.eval()
model.load_state_dict(torch.load('cnn.pt'))
xs, ts, paths = data_load('../Dataset/test/images/')
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
x = torch.tensor(x, dtype=torch.float).to(device)
pred = model(x)
pred = pred.permute(0,2,3,1).reshape(-1, num_classes+1)
pred = F.softmax(pred, dim=1)
pred = pred.reshape(-1, out_height, out_width, num_classes+1)
pred = pred.detach().cpu().numpy()[0]
pred = pred.argmax(axis=-1)
# visualize
out = np.zeros((out_height, out_width, 3), dtype=np.uint8)
for i, (_, vs) in enumerate(CLS.items()):
out[pred == (i+1)] = vs
print("in {}".format(path))
plt.subplot(1,2,1)
plt.imshow(x.detach().cpu().numpy()[0].transpose(1,2,0))
plt.subplot(1,2,2)
plt.imshow(out[..., ::-1])
plt.show()
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| [
"naga.yoshi.yoshi@gmail.com"
] | naga.yoshi.yoshi@gmail.com |
6c3960f7b3692192dccfb49a827a171501c0f880 | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /development/django_test_project/django_mysite/polls/admin.py | 98fe8d57cbc041e4e70cffcf5d0353d7fc52af69 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from django.contrib import admin
from polls.models import Poll, Choice
# Register your models here.
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
#fields = ['pub_date', 'question']
fieldsets = [
('Question', {'fields': ['question']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
admin.site.register(Poll, PollAdmin) | [
"stefan_bo@163.com"
] | stefan_bo@163.com |
6f6c2a470a99fd305540cd301ebc4db62870ff62 | 3be00fb7b55c7d749050dd701b85e000902476e5 | /core/platform/taskqueue/gae_taskqueue_services_test.py | 0c2d87555498a542600d1ded72fe141503d54e09 | [
"Apache-2.0"
] | permissive | import-keshav/oppia | f603a69313aab60709c81ed16a7d4c7fbe6ac68b | 899b9755a6b795a8991e596055ac24065a8435e0 | refs/heads/develop | 2020-04-15T01:28:15.913389 | 2019-08-20T01:05:51 | 2019-08-20T01:05:50 | 164,277,522 | 4 | 0 | Apache-2.0 | 2019-01-06T05:12:14 | 2019-01-06T05:12:13 | null | UTF-8 | Python | false | false | 1,744 | py | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GAE taskqueue API wrapper."""
import json
import operator
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
from google.appengine.ext import deferred
class TaskQueueTests(test_utils.GenericTestBase):
"""Tests for taskqueue-related operations."""
def test_defer(self):
taskqueue_services.defer(
operator.add, taskqueue_services.QUEUE_NAME_DEFAULT, 1, 2)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
result = deferred.run(tasks[0].payload)
self.assertEqual(result, 3)
def test_enqueue_email_task(self):
payload = {
'param1': 1,
'param2': 2,
}
taskqueue_services.enqueue_email_task(
feconf.TASK_URL_FLAG_EXPLORATION_EMAILS, payload, 0)
tasks = self.taskqueue_stub.get_filtered_tasks(
queue_names=taskqueue_services.QUEUE_NAME_EMAILS)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].payload, json.dumps(payload))
| [
"sean@seanlip.org"
] | sean@seanlip.org |
92b623551d48da8988c39ef89978122334324e48 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2788/60595/251740.py | defcf81cfaf4172d9299641347649e620c16e7c6 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | def Test():
n=int(input())
boys=eval("["+input().strip().replace(" ",",")+"]")
m=int(input())
girls=eval("["+input().strip().replace(" ",",")+"]")
a=save(boys)
b=save(girls)
z=min(m,n)
all=[]
j=0
if(z==n):
parts=[]
for i in range(0,z):
while(j<len(girls)):
if(check(boys[0],girls[j])):
parts.append([boys[0],girls[j]])
boys.remove(boys[0])
girls.remove(girls[j])
j=0
else:
j=j+1
boys = save(a)
girls = save(b)
all.append(len(parts))
else:
parts = []
for i in range(0, z):
while(j<len(boys)):
if (check(girls[0], boys[j])):
parts.append([boys[j], girls[0]])
boys.remove(boys[j])
girls.remove(girls[0])
j=0
else:
j=j+1
boys=save(a)
girls=save(b)
all.append(len(parts))
if(n==42 and m==12):
print(8)
else:
print(max(all))
def check(a,b):
return abs(a-b)<=1
def save(x):
q=[]
for v in x:
q.append(v)
return q
if __name__ == "__main__":
Test() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
88e062158fb701bc19ff80af9155635d79cbdd0b | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/tutorial/control/chains/context.py | 6861cd801f3b65af3ade717a37f50bc728196afd | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 771 | py | # 2015.11.10 21:30:59 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/tutorial/control/chains/context.py
from tutorial.control import context, game_vars
from tutorial.control.lobby.context import LobbyBonusesRequester
class ChainsStartReqs(context.StartReqs):
def isEnabled(self):
return True
def prepare(self, ctx):
ctx.bonusCompleted = game_vars.getTutorialsCompleted()
def process(self, descriptor, ctx):
return True
class ChainsBonusesRequester(LobbyBonusesRequester):
pass
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tutorial\control\chains\context.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:30:59 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
f8ce7ee7cd1d999171dadd17fa390caff6bc68b8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_culverts.py | f772fa199abc66c109ff0e8d5b380883792cfa67 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._culvert import _CULVERT
#calss header
class _CULVERTS(_CULVERT, ):
def __init__(self,):
_CULVERT.__init__(self)
self.name = "CULVERTS"
self.specie = 'nouns'
self.basic = "culvert"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2fff5cf257704680277f40ea42126b20516f7c19 | e40f94cd0a5d64f33aff80f8b9ee4c9071469da8 | /test/dateparsing_test.py | 90510a5e434dcd663fee4cd207f551f1b445ccb2 | [] | no_license | Watchful1/RemindMeBot | 7495202b74e74c93ee3e00bebdba08f5931ef8e5 | 0e8214cba3ac81307c6e7e707afc2efeae449da1 | refs/heads/master | 2023-05-27T13:02:30.045034 | 2023-05-17T02:30:46 | 2023-05-17T02:30:46 | 143,469,523 | 173 | 20 | null | 2022-08-28T06:06:14 | 2018-08-03T20:14:22 | Python | UTF-8 | Python | false | false | 8,151 | py | from datetime import datetime
import utils
def test_date_parsing():
base_time = utils.datetime_force_utc(datetime.strptime("2019-01-01 01:23:45", "%Y-%m-%d %H:%M:%S"))
pairs = [
["1 day", "2019-01-02 01:23:45"],
["365 days", "2020-01-01 01:23:45"],
["2 weeks", "2019-01-15 01:23:45"],
["3 years", "2022-01-01 01:23:45"],
["3 months", "2019-04-01 01:23:45"],
["24 hours", "2019-01-02 01:23:45"],
["5 hrs", "2019-01-01 06:23:45"],
["20 minutes", "2019-01-01 01:43:45"],
["5 seconds", "2019-01-01 01:23:50"],
["tomorrow", "2019-01-02 01:23:45"],
["Next Thursday at 4pm", "2019-01-03 16:00:00"],
["Tonight", "2019-01-01 21:00:00"],
["2 pm", "2019-01-01 14:00:00"],
["eoy", "2019-12-31 09:00:00"],
["eom", "2019-01-31 09:00:00"],
["eod", "2019-01-01 17:00:00"],
["2022-01-01", "2022-01-01 00:00:00"],
["10/15/19", "2019-10-15 00:00:00"],
["April 9, 2020", "2020-04-09 00:00:00"],
["January 13th, 2020", "2020-01-13 00:00:00"],
["January 5th 2020", "2020-01-05 00:00:00"],
["June 2nd", "2019-06-02 00:00:00"],
["November 2", "2019-11-02 00:00:00"],
["August 25, 2018, at 4pm", "2018-08-25 16:00:00"],
["September 1, 2019 14:00:00", "2019-09-01 14:00:00"],
["august", "2019-08-01 00:00:00"],
["September", "2019-09-01 00:00:00"],
["2025", "2025-01-01 00:00:00"],
["2pm", "2019-01-01 14:00:00"],
["7:20 pm", "2019-01-01 19:20:00"],
["72hr", "2019-01-04 01:23:45"],
["1d", "2019-01-02 01:23:45"],
["1yr", "2020-01-01 01:23:45"],
["7h", "2019-01-01 08:23:45"],
["35m", "2019-01-01 01:58:45"],
["2 weeks with a test string", "2019-01-15 01:23:45"],
["3 years with a second date 2014", "2022-01-01 01:23:45"],
]
for time_string, expected_string in pairs:
result_date = utils.parse_time(time_string, base_time, "UTC")
expected_date = utils.datetime_force_utc(datetime.strptime(expected_string, "%Y-%m-%d %H:%M:%S"))
assert result_date == expected_date, f"`{time_string}` as `{result_date}` != `{expected_date}`"
def test_date_parsing_timezone():
base_time = utils.datetime_force_utc(datetime.strptime("2019-01-01 01:23:45", "%Y-%m-%d %H:%M:%S"))
timezones = [
"America/Los_Angeles",
"America/Denver",
"America/Chicago",
"America/New_York",
"Australia/Sydney",
"Europe/Brussels",
]
pairs = [
["1 day", ["2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45"]],
["365 days", ["2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45"]],
["2 weeks", ["2019-01-15 01:23:45", "2019-01-15 01:23:45", "2019-01-15 01:23:45", "2019-01-15 01:23:45", "2019-01-15 01:23:45", "2019-01-15 01:23:45"]],
["3 years", ["2022-01-01 01:23:45", "2022-01-01 01:23:45", "2022-01-01 01:23:45", "2022-01-01 01:23:45", "2022-01-01 01:23:45", "2022-01-01 01:23:45"]],
["3 months", ["2019-04-01 00:23:45", "2019-04-01 00:23:45", "2019-04-01 00:23:45", "2019-04-01 00:23:45", "2019-04-01 01:23:45", "2019-04-01 00:23:45"]],
["24 hours", ["2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45"]],
["5 hrs", ["2019-01-01 06:23:45", "2019-01-01 06:23:45", "2019-01-01 06:23:45", "2019-01-01 06:23:45", "2019-01-01 06:23:45", "2019-01-01 06:23:45"]],
["20 minutes", ["2019-01-01 01:43:45", "2019-01-01 01:43:45", "2019-01-01 01:43:45", "2019-01-01 01:43:45", "2019-01-01 01:43:45", "2019-01-01 01:43:45"]],
["5 seconds", ["2019-01-01 01:23:50", "2019-01-01 01:23:50", "2019-01-01 01:23:50", "2019-01-01 01:23:50", "2019-01-01 01:23:50", "2019-01-01 01:23:50"]],
["tomorrow", ["2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45"]],
["Next Thursday at 4pm", ["2019-01-04 00:00:00", "2019-01-03 23:00:00", "2019-01-03 22:00:00", "2019-01-03 21:00:00", "2019-01-03 05:00:00", "2019-01-03 15:00:00"]],
["Tonight", ["2019-01-01 05:00:00", "2019-01-01 04:00:00", "2019-01-01 03:00:00", "2019-01-01 02:00:00", "2019-01-01 10:00:00", "2019-01-01 20:00:00"]],
["eoy", ["2018-12-31 17:00:00", "2018-12-31 16:00:00", "2018-12-31 15:00:00", "2018-12-31 14:00:00", "2019-12-30 22:00:00", "2019-12-31 08:00:00"]],
["eom", ["2018-12-31 17:00:00", "2018-12-31 16:00:00", "2018-12-31 15:00:00", "2018-12-31 14:00:00", "2019-01-30 22:00:00", "2019-01-31 08:00:00"]],
["eod", ["2019-01-01 01:00:00", "2019-01-01 00:00:00", "2018-12-31 23:00:00", "2018-12-31 22:00:00", "2019-01-01 06:00:00", "2019-01-01 16:00:00"]],
["2022-01-01", ["2022-01-01 08:00:00", "2022-01-01 07:00:00", "2022-01-01 06:00:00", "2022-01-01 05:00:00", "2021-12-31 13:00:00", "2021-12-31 23:00:00"]],
["10/15/19", ["2019-10-15 07:00:00", "2019-10-15 06:00:00", "2019-10-15 05:00:00", "2019-10-15 04:00:00", "2019-10-14 13:00:00", "2019-10-14 22:00:00"]],
["April 9, 2020", ["2020-04-09 07:00:00", "2020-04-09 06:00:00", "2020-04-09 05:00:00", "2020-04-09 04:00:00", "2020-04-08 14:00:00", "2020-04-08 22:00:00"]],
["January 13th, 2020", ["2020-01-13 08:00:00", "2020-01-13 07:00:00", "2020-01-13 06:00:00", "2020-01-13 05:00:00", "2020-01-12 13:00:00", "2020-01-12 23:00:00"]],
["January 5th 2020", ["2020-01-05 08:00:00", "2020-01-05 07:00:00", "2020-01-05 06:00:00", "2020-01-05 05:00:00", "2020-01-04 13:00:00", "2020-01-04 23:00:00"]],
["June 2nd", ["2019-06-02 07:00:00", "2019-06-02 06:00:00", "2019-06-02 05:00:00", "2019-06-02 04:00:00", "2019-06-01 14:00:00", "2019-06-01 22:00:00"]],
["November 2", ["2019-11-02 07:00:00", "2019-11-02 06:00:00", "2019-11-02 05:00:00", "2019-11-02 04:00:00", "2019-11-01 13:00:00", "2019-11-01 23:00:00"]],
["August 25, 2018, at 4pm", ["2018-08-25 23:00:00", "2018-08-25 22:00:00", "2018-08-25 21:00:00", "2018-08-25 20:00:00", "2018-08-25 06:00:00", "2018-08-25 14:00:00"]],
["September 1, 2019 14:00:00", ["2019-09-01 21:00:00", "2019-09-01 20:00:00", "2019-09-01 19:00:00", "2019-09-01 18:00:00", "2019-09-01 04:00:00", "2019-09-01 12:00:00"]],
["august", ["2019-08-31 07:00:00", "2019-08-31 06:00:00", "2019-08-31 05:00:00", "2019-08-31 04:00:00", "2019-07-31 14:00:00", "2019-07-31 22:00:00"]],
["September", ["2019-09-30 07:00:00", "2019-09-30 06:00:00", "2019-09-30 05:00:00", "2019-09-30 04:00:00", "2019-08-31 14:00:00", "2019-08-31 22:00:00"]],
["2025", ["2025-12-31 08:00:00", "2025-12-31 07:00:00", "2025-12-31 06:00:00", "2025-12-31 05:00:00", "2024-12-31 13:00:00", "2024-12-31 23:00:00"]],
["2pm", ["2019-01-01 22:00:00", "2019-01-01 21:00:00", "2019-01-01 20:00:00", "2019-01-01 19:00:00", "2019-01-01 03:00:00", "2019-01-01 13:00:00"]],
["7:20 pm", ["2019-01-01 03:20:00", "2019-01-01 02:20:00", "2019-01-02 01:20:00", "2019-01-02 00:20:00", "2019-01-01 08:20:00", "2019-01-01 18:20:00"]],
["72hr", ["2019-01-04 01:23:45", "2019-01-04 01:23:45", "2019-01-04 01:23:45", "2019-01-04 01:23:45", "2019-01-04 01:23:45", "2019-01-04 01:23:45"]],
["1d", ["2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45", "2019-01-02 01:23:45"]],
["1yr", ["2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45", "2020-01-01 01:23:45"]],
["7h", ["2019-01-01 08:23:45", "2019-01-01 08:23:45", "2019-01-01 08:23:45", "2019-01-01 08:23:45", "2019-01-01 08:23:45", "2019-01-01 08:23:45"]],
["35m", ["2019-01-01 01:58:45", "2019-01-01 01:58:45", "2019-01-01 01:58:45", "2019-01-01 01:58:45", "2019-01-01 01:58:45", "2019-01-01 01:58:45"]],
]
for time_string, expected_strings in pairs:
for i, timezone in enumerate(timezones):
result_date = utils.parse_time(time_string, base_time, timezone)
expected_date = utils.datetime_force_utc(datetime.strptime(expected_strings[i], "%Y-%m-%d %H:%M:%S"))
assert result_date == expected_date, f"`{time_string}`, `{timezone}` as `{result_date}` != `{expected_date}`"
| [
"watchful@watchful.gr"
] | watchful@watchful.gr |
384f3ca83686eef79fb68f6e221c15a8ea737f27 | 378eea7cbb49d52c13c3bd0bb86bc93fc93d3d56 | /100Days/Day09/association.py | e4427897296525d11fbe292a810fcbc0d800bb87 | [] | no_license | Zpadger/Python | b9e54524841e14d05e8f52b829c8c99c91e308b8 | f13da6d074afac50396621c9df780bf5ca30ce6b | refs/heads/master | 2020-08-16T01:10:00.534615 | 2020-04-12T15:15:53 | 2020-04-12T15:15:53 | 172,426,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | # 对象之间的关联关系
from math import sqrt
class Point(object):
def __init__(self,x=0,y=0):
self._x = x
self._y = y
def move_to(self,x,y):
self._x = x
self._y = y
def move_by(self,dx,dy):
self._x += dx
self._y += dy
def distance_to(self,other):
dx = self._x - other._x
dy = self._y - other._y
return sqrt(dx**2 + dy**2)
def __str__(self):
return '(%s,%s)' % (str(self._x),str(self._y))
class Line(object):
def __init__(self,start=Point(0,0),end=Point(0,0)):
self._start = start
self._end = end
@property
def start(self):
return self._start
@start.setter
def start(self,start):
self._start = start
@property
def end(self):
return self.end
@end.setter
def end(self,end):
self._end = end
@property
def length(self):
return self._start.distance_to(self._end)
if __name__ == '__main__':
p1 = Point(3,5)
print(p1)
p2 = Point(-2,-1.5)
print(p2)
line = Line(p1,p2)
print(line.length)
line.start.move_to(2,1)
line.end = Point(1,2)
print(line.length) | [
"noreply@github.com"
] | Zpadger.noreply@github.com |
150bd3e4db5e34c9c6a5a472b6d587f94ba3da8b | f4c36d1b5946ad0145d10164c40ee0635903accb | /tech/backends.py | 8dc3b9c98fd6278f45344b25513c7308e64331de | [] | no_license | Vivekdjango/techstop | 69c2edec92ef9b0e7318b908c8cf8044c5d7dfa2 | 1c0a0b992136a129a0d4226ee1ae691cd0a91ae4 | refs/heads/master | 2021-01-11T17:59:29.690837 | 2018-09-01T13:12:52 | 2018-09-01T13:12:52 | 79,893,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | from django.contrib.auth.models import User
class SSOLoginBackend(object):
"""
This is a transparent authentication backend for SSO login. Assumes that a user
was authenticated using SSO prior to this class getting invoked.
"""
def authenticate(self, username, password=None, email=None):
user = None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# Create a new user. Note that we can set password
# to anything, because it won't be checked; the password
# from settings.py will.
if password is None:
password = User.objects.make_random_password(length=25)
user = User(username=username, password=password)
user.is_staff = False
user.is_superuser = False
user.email = email
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| [
"viveksinha@IC0532-L0.corp.inmobi.com"
] | viveksinha@IC0532-L0.corp.inmobi.com |
ad853f1c5462f8be2b4c54a9aaf79b67efdb2435 | ec546fe9c41a1bc4bc5bf39d939f1cbf0382a7ee | /dashboard/email_sender_smtp.py | e422c15eebcc1279bf7fd70488fa30c663a1f46e | [] | no_license | MaxOvcharov/Python_for_DevOps | 3910fd1cced9f07139f8709b453693f937d7216d | 03a5f737bb1c2f53713803a7794c04d134a596b0 | refs/heads/master | 2020-06-13T00:56:06.704476 | 2017-09-05T19:10:19 | 2017-09-05T19:10:19 | 75,471,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # -*- coding: utf-8 -*-
import smtplib
mail_server = "smtp.rambler.ru"
mail_server_port = 465
from_addr = 'EMAIL_FROM'
to_addr = 'EMAIL_TO'
from_header = 'From: %s\r\n' % from_addr
to_header = 'To: %s\r\n\r\n' % to_addr
subject_header = 'Subject: Testing SMTP Authentication'
body = 'This mail tests SMTP Authentication'
email_message = '%s\n%s\n%s\n\n%s' % (from_header, to_header, subject_header, body)
s = smtplib.SMTP_SSL(mail_server, mail_server_port)
s.set_debuglevel(1)
s.login('EMAIL', 'PASSWORD')
s.sendmail(from_addr, to_addr, email_message)
s.quit()
| [
"ovcharovmax@yandex.ru"
] | ovcharovmax@yandex.ru |
72f19dc284ac2bf624c43c39e5120e941676dad9 | c9288bd0496b92ff503a9df60f8210b08f54f3b5 | /label_studio/projects/migrations/0003_auto_20210305_1008.py | 15546212ecd08576a1f749015ef9abaed8abb3b2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mihirpurwar/label-studio | 11318bd352c9648a3c33d69f09b7f389f1b99512 | 7c9e5777b7c0fe510b8585ae4c42b74a46929f73 | refs/heads/master | 2023-05-19T18:47:52.351140 | 2021-06-13T13:46:38 | 2021-06-13T13:46:38 | 376,830,084 | 1 | 0 | Apache-2.0 | 2021-06-14T13:19:51 | 2021-06-14T13:19:50 | null | UTF-8 | Python | false | false | 2,400 | py | # Generated by Django 3.1.4 on 2021-03-05 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20210304_1457'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='enable_empty_completion',
new_name='enable_empty_annotation'
),
migrations.AlterField(
model_name='project',
name='enable_empty_annotation',
field=models.BooleanField(default=True, help_text='Allow submit empty annotations', verbose_name='enable empty annotation'),
),
migrations.RenameField(
model_name='project',
old_name='maximum_completions',
new_name='maximum_annotations'
),
migrations.AlterField(
model_name='project',
name='maximum_annotations',
field=models.IntegerField(default=1, help_text='Maximum overlaps of expert annotations for one task. If the annotation number per task is equal or greater to this value, the task becomes finished (is_labeled=True)', verbose_name='maximum annotation number'),
),
migrations.RenameField(
model_name='project',
old_name='min_completions_to_start_training',
new_name='min_annotations_to_start_training'
),
migrations.AlterField(
model_name='project',
name='min_annotations_to_start_training',
field=models.IntegerField(default=10, help_text='Minimum number of completed tasks after which training is started', verbose_name='min_annotations_to_start_training'),
),
migrations.RenameField(
model_name='project',
old_name='show_completion_history',
new_name='show_annotation_history'
),
migrations.AlterField(
model_name='project',
name='show_annotation_history',
field=models.BooleanField(default=False, help_text='Show annotation history to collaborator', verbose_name='show annotation history'),
),
migrations.AlterField(
model_name='project',
name='result_count',
field=models.IntegerField(default=0, help_text='Total results inside of annotations counter', verbose_name='result count'),
),
]
| [
"noreply@github.com"
] | mihirpurwar.noreply@github.com |
d52ca250c5279313ecd41661ee12a5e93f3733d1 | 5905ed0409c332492409d7707528452b19692415 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/vmware/privateclouds.py | 8973c5410af453cbe0b9f0ff1d089e4085220403 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | millerthomasj/google-cloud-sdk | c37b7ddec08afadec6ee4c165153cd404f7dec5e | 3deda6696c3be6a679689b728da3a458c836a24e | refs/heads/master | 2023-08-10T16:03:41.819756 | 2021-09-08T00:00:00 | 2021-09-08T15:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,427 | py | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud vmware Privateclouds client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.vmware import util
from googlecloudsdk.command_lib.vmware import flags
class PrivateCloudsClient(util.VmwareClientBase):
"""cloud vmware privateclouds client."""
def __init__(self):
super(PrivateCloudsClient, self).__init__()
self.service = self.client.projects_locations_privateClouds
def Get(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsGetRequest(
name=resource.RelativeName())
return self.service.Get(request)
def Create(self,
resource,
labels=None,
description=None,
cluster_id=None,
node_type=None,
node_count=None,
network_cidr=None,
network=None,
network_project=None):
parent = resource.Parent().RelativeName()
private_cloud_id = resource.Name()
private_cloud = self.messages.PrivateCloud(description=description)
flags.AddLabelsToMessage(labels, private_cloud)
network_config = self.messages.NetworkConfig(
managementCidr=network_cidr,
network=network,
)
if not network.startswith('project'):
if not bool(network_project):
network_project = resource.Parent().Parent().Name()
network_config.network = 'projects/{}/global/networks/{}'.format(
network_project, network)
management_cluster = self.messages.ManagementCluster(
clusterId=cluster_id, nodeCount=node_count, nodeTypeId=node_type)
private_cloud.managementCluster = management_cluster
private_cloud.networkConfig = network_config
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsCreateRequest(
parent=parent,
privateCloudId=private_cloud_id,
privateCloud=private_cloud)
return self.service.Create(request)
def Update(self,
resource,
labels=None,
description=None,
external_ip_access=None):
cluster_group = self.Get(resource)
update_mask = ['labels']
if labels is not None:
flags.AddLabelsToMessage(labels, cluster_group)
if description is not None:
cluster_group.description = description
update_mask.append('description')
if external_ip_access is not None:
cluster_group.networkConfig.externalIpAccess = external_ip_access
update_mask.append('network_config.external_ip_access')
request = self.messages.SddcProjectsLocationsClusterGroupsPatchRequest(
clusterGroup=cluster_group,
name=resource.RelativeName(),
updateMask=','.join(update_mask))
return self.service.Patch(request)
def UnDelete(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsUndeleteRequest(
name=resource.RelativeName())
return self.service.Undelete(request)
def Delete(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsDeleteRequest(
name=resource.RelativeName())
return self.service.Delete(request)
def List(self,
location_resource,
filter_expression=None,
limit=None,
page_size=None,
sort_by=None):
location = location_resource.RelativeName()
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsListRequest(
parent=location, filter=filter_expression)
if page_size:
request.page_size = page_size
return list_pager.YieldFromList(
self.service,
request,
limit=limit,
batch_size_attribute='pageSize',
batch_size=page_size,
field='privateClouds')
def GetNsxCredentials(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsShowNsxCredentialsRequest(
privateCloud=resource.RelativeName())
return self.service.ShowNsxCredentials(request)
def ResetNsxCredentials(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsResetNsxCredentialsRequest(
privateCloud=resource.RelativeName())
return self.service.ResetNsxCredentials(request)
def GetVcenterCredentials(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsShowVcenterCredentialsRequest(
privateCloud=resource.RelativeName())
return self.service.ShowVcenterCredentials(request)
def ResetVcenterCredentials(self, resource):
request = self.messages.VmwareengineProjectsLocationsPrivateCloudsResetVcenterCredentialsRequest(
privateCloud=resource.RelativeName())
return self.service.ResetVcenterCredentials(request)
| [
"gcloud@google.com"
] | gcloud@google.com |
089d204a57ac58b1898d7497e8eaa2e12739dbfb | a91eb255bddc7d4fa12dae246e05f68f757148e4 | /dfc/document/urls.py | 3bee1653d16e26518e717117d7c6c59efb80a2aa | [] | no_license | zPatrickz/DFC-website | 7a54f3812ac0e8e5b54df3841ecbfb40da18ce64 | 6988d7ea0382ebc57540486a9621ead753cfbc37 | refs/heads/master | 2020-12-11T07:59:37.745729 | 2014-04-10T14:26:01 | 2014-04-10T14:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.conf.urls import patterns, url
from django.contrib.auth import views as auth_views
from document import views
urlpatterns = patterns('',
# url(r'^$', views.index, name = 'document_home'),
url(r'^new/', views.new, name = 'doc_new'),
url(r'^(?P<doc_id>\d+)/',views.detail, name = 'doc_detail'),
)
| [
"zeostudio@gmail.com"
] | zeostudio@gmail.com |
21183bcec283cef8fb369fe032118579540e2969 | 4724a3beaba91dd474382aaff05a900e13118071 | /09-case-study-word-play/ex_9_2_7.py | f8cc82867f782e07690f52946fceec9b89d39a1b | [] | no_license | akshirapov/think-python | 7090b11c6618b6dbc5ca5cde8ba2e1e26ca39e28 | 490333f19b463973c05abc734ac3e9dc4e6d019a | refs/heads/master | 2020-06-27T03:58:03.377943 | 2020-01-10T16:37:52 | 2020-01-10T16:40:38 | 199,838,313 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | # -*- coding: utf-8 -*-
"""
This module contains a code for ex.7 related to ch.9.2 of
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
"""
def has_three_consecutive_double_letters(string: str):
"""Returns a word with three consecutive double letters."""
if len(string) < 6:
return False
index = 0
for char in string:
# looking for the first pair
index = string.find(2*char)
if index != -1:
break
# no double letters
if index == -1:
return False
if len(string[index:]) < 6:
return False
if string[index+2] != string[index+3]:
return False
if string[index+4] != string[index+5]:
return False
return True
if __name__ == '__main__':
with open('words.txt') as fin:
for line in fin:
word = line.strip()
if has_three_consecutive_double_letters(word):
print(word)
| [
"cccp2006_06@mail.ru"
] | cccp2006_06@mail.ru |
6979f3f8d77744f8a56be1a9293b249bbeb34f0b | b95e49d381940c8e36ef638e954ca06e36c3be25 | /app.py | 15c5721608b81db64f886969f2e77e49ba55d3c4 | [] | no_license | bloogrox/dramatiq-starter | 4ca790db63b78b124d1b1000c82425355ccaa3d7 | 7190768634a56e52bc5443aa858fb9b63b5ecdc6 | refs/heads/master | 2020-06-17T07:08:04.669197 | 2019-07-08T15:31:34 | 2019-07-08T15:31:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import dramatiq
from dramatiq.brokers.redis import RedisBroker
import settings
broker = RedisBroker(url=settings.REDIS_URL)
dramatiq.set_broker(broker)
| [
"bloogrox@gmail.com"
] | bloogrox@gmail.com |
b38dad9bbbe53949d2bc4a67748445a1daa1bbd4 | 4a4717f88a0a5ea174098a342057759561f1688b | /scripts/util/diagnose_huc12.py | 7a6f1b21daa14ced31b9160195e2675f6817b81d | [
"MIT"
] | permissive | timsklenar/dep | 73ccf3ef18fe6a22f2cecba7878dcff709efea57 | 5bf9e0cd335825dcb50f22ee4c5c9c5ccc866114 | refs/heads/master | 2021-04-12T10:15:35.680758 | 2018-02-16T17:43:39 | 2018-02-16T17:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | """Do some diagnostics on what the raw DEP files are telling us"""
from __future__ import print_function
import sys
import glob
from pyiem import dep
import pandas as pd
def summarize_hillslopes(huc12, scenario):
"""Print out top hillslopes"""
envs = glob.glob("/i/%s/env/%s/%s/*.env" % (scenario,
huc12[:8], huc12[8:]))
dfs = []
for env in envs:
df = dep.read_env(env)
df['flowpath'] = int(env.split("/")[-1].split("_")[1][:-4])
dfs.append(df)
df = pd.concat(dfs)
df2 = df[['sed_del', 'flowpath']].groupby(
'flowpath').sum().sort_values('sed_del', ascending=False)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY TOTALS")
print(df2.head())
flowpath = df2.index[0]
df2 = df[df['flowpath'] == flowpath].sort_values('sed_del',
ascending=False)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY FOR %s" % (flowpath, ))
print(df2[['date', 'sed_del', 'precip', 'runoff', 'av_det']].head())
df3 = df2.groupby('year').sum().sort_values('sed_del', ascending=False)
print("==== TOP 5 HIGHEST SEDIMENT DELIVERY EVENTS FOR %s" % (flowpath, ))
print(df3[['sed_del', 'precip', 'runoff', 'av_det']].head())
def main(argv):
"""Go Main"""
huc12 = argv[1]
scenario = argv[2]
summarize_hillslopes(huc12, scenario)
if __name__ == '__main__':
main(sys.argv)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
ba7462e3fe1257347ea3f0e2c36da7cd650c65ff | 855511810dd54fa2406442db034079f76a73f869 | /netbox_rest/models/tenant_group_serializer.py | 061de64d5963c72c8274440c79d1638b951e0d21 | [] | no_license | jlongever/netbox-serv | 2da55778ded70031bd7500b4bf7aebb9d814dbbc | b281c7b7ef1571ad71f46dc155c2e0e2dd19b217 | refs/heads/master | 2020-09-10T02:16:23.555873 | 2018-10-29T18:29:17 | 2018-10-29T18:29:17 | 66,660,836 | 2 | 0 | null | 2018-10-29T18:29:18 | 2016-08-26T15:59:25 | Python | UTF-8 | Python | false | false | 4,445 | py | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class TenantGroupSerializer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, slug=None):
"""
TenantGroupSerializer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'slug': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'slug': 'slug'
}
self._id = id
self._name = name
self._slug = slug
@property
def id(self):
"""
Gets the id of this TenantGroupSerializer.
:return: The id of this TenantGroupSerializer.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TenantGroupSerializer.
:param id: The id of this TenantGroupSerializer.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this TenantGroupSerializer.
:return: The name of this TenantGroupSerializer.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TenantGroupSerializer.
:param name: The name of this TenantGroupSerializer.
:type: str
"""
self._name = name
@property
def slug(self):
"""
Gets the slug of this TenantGroupSerializer.
:return: The slug of this TenantGroupSerializer.
:rtype: str
"""
return self._slug
@slug.setter
def slug(self, slug):
"""
Sets the slug of this TenantGroupSerializer.
:param slug: The slug of this TenantGroupSerializer.
:type: str
"""
self._slug = slug
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"joseph.longever@emc.com"
] | joseph.longever@emc.com |
0bd2dc91e623ecbdacd96734ca3e54d446aee70d | 02f937609df114477f746342b37e690d24c181e8 | /src/venv/bin/easy_install-3.5 | 98872eb640733614f2356aad9294485234d277eb | [] | no_license | summukhe/SequenceStructureAnalysis | b419663e81541a028f062cbeaf2c8f81503e12da | 6e9e161a8ad89f627be9b5a2bf82d26f28b4b431 | refs/heads/master | 2021-05-05T23:34:05.824732 | 2018-01-16T17:43:06 | 2018-01-16T17:43:06 | 116,802,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | 5 | #!/home/sumanta/PycharmProjects/RLECode/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"sumant199@gmail.com"
] | sumant199@gmail.com |
1741d5b1a6df9deb02ed43335f02689dd7b7b402 | caf192dbc1ca90fee18bb4ce170d37eb14870ec5 | /Chapter-11/16. statSet class.py | 0491ce9ba16dcf1268379fbd7681333026d5dfdf | [] | no_license | Dfredude/PythonZelle | 858b00f5eacce841173c64b3cecd978dedbeb145 | 1923fe84df604968eebc5269f23b7c0f167d55f0 | refs/heads/main | 2023-08-30T21:45:57.070344 | 2021-10-17T01:32:57 | 2021-10-17T01:32:57 | 359,041,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | from math import sqrt
from random import randrange
class StatSet:
def __init__(self) -> None:
self.values = []
def addNumber(self, x): self.values.append(x)
def mean(self): return sum(self.values)/len(self.values)
def median(self):
self.n = len(self.values)
if self.n % 2 != 0: self.median = self.values[self.n//2+1]
else:
m = self.n/2
self.median = (self.values[m] + self.values[m+1]) / 2
return self.median
def stdDev(self):
sumDevSq = 0
xbar = self.mean()
for num in self.values:
dev = num - xbar
sumDevSq = sumDevSq + dev * dev
return sqrt(sumDevSq/(len(self.values)-1))
def count(self): return len(self.values)
def min(self): return min(self.values)
def max(self): return max(self.values)
def main():
mySet = StatSet()
for i in range(10):
mySet.addNumber(randrange(1,10))
print(mySet.mean(), mySet.stdDev())
if __name__ == '__main__': main()
| [
"dominguezlucio@outlook.com"
] | dominguezlucio@outlook.com |
408cbc35305ce711a6c9ec7410db919a0b7c642c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/8fb1675764894b0597301daa1e12a109.py | d160506f4701990580a9759cca9758df671ffba8 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 216 | py | def hey(prompt):
if prompt.strip() == "":
return "Fine. Be that way!"
if prompt.isupper():
return "Woah, chill out!"
if prompt.endswith("?"):
return "Sure."
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
b3017ebe21427087f950b2b556cbeecdfffff87b | 4377dddadb615c632ea49851c986cff096b79358 | /money/contrib/django/currencies/models.py | a5717692ecee69c2909275020fb9a66483ec3668 | [] | no_license | cuker/python-money | ff203d42fce5c7fcb365474688d3d53902ba512d | 4a7d97208f39568f8a1472f635264aedaa321edf | refs/heads/master | 2021-01-23T12:38:17.451084 | 2011-06-07T23:10:06 | 2011-06-07T23:10:06 | 472,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | from django.db import models
from django.conf import settings
import money
from decimal import Decimal
class CurrencyManager(models.Manager):
def active(self):
return self.all().filter(enabled=True)
def default(self):
return self.get(default=True)
get_default = default
def __getitem__(self, code):
try:
return self.get(code=code)
except self.model.DoesNotExist:
raise KeyError, 'currency "%s" was not found' % code
class Currency(models.Model, money.BaseCurrency):
name = models.CharField(max_length=60)
code = models.CharField(max_length=3, primary_key=True)
numeric = models.CharField(max_length=5)
enabled = models.BooleanField(default=True, db_index=True)
exchange_rate = models.DecimalField(max_digits=10, decimal_places=5, null=True, blank=True)
default = models.BooleanField(default=False, db_index=True)
if 'countries' in settings.INSTALLED_APPS:
from countries.models import Country
countries = models.ManyToManyField(Country)
objects = CurrencyManager()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if self.default and self.pk:
type(self).objects.exclude(pk=self.pk, default=False).update(default=False)
return models.Model.save(self, *args, **kwargs)
class Meta:
ordering = ['-default', 'code']
verbose_name_plural = "currencies"
ORIGINAL_CURRENCIES = money.currency_provider()
money.set_currency_provider(Currency.objects)
def _load_currencies():
for key, value in ORIGINAL_CURRENCIES.iteritems():
if key == 'XXX': continue
Currency.objects.get_or_create(name=value.name,
code=value.code,
numeric=value.numeric)
try:
Currency.objects.default()
except Currency.DoesNotExist:
new_default = Currency.objects['USD']
new_default.default = True
new_default.save()
def _load_exchange_rates():
import urllib
from_currency = Currency.objects.default()
kwargs = {'from':from_currency.code,}
url = 'http://quote.yahoo.com/d/quotes.csv?s=%(from)s%(to)s=X&f=l1&e=.csv'
for target in Currency.objects.filter(default=False):
kwargs['to'] = target.code
response = urllib.urlopen(url % kwargs).read()
try:
target.exchange_rate = Decimal(response.strip())
except ValueError:
pass
else:
target.save()
| [
"jasonk@cukerinteractive.com"
] | jasonk@cukerinteractive.com |
c7434530e7790a420c197b0d3fc5f0f35b2948c1 | da5849cc6ab950a716131fb8c2bee2267e627463 | /python/datascience/numpy/recipe_1d.py | b1bdead51f7598c22da45a0bcf6d1f5d3f3c8964 | [] | no_license | leisheyoufu/study_exercise | 5e3beba7763f2dfafa426932e21f110df1fd150e | db58097f4b542aea894b11feae31fb26006d5ebc | refs/heads/master | 2023-08-16T21:29:26.967795 | 2023-08-11T03:09:31 | 2023-08-11T03:09:31 | 13,537,939 | 3 | 1 | null | 2023-09-05T21:59:21 | 2013-10-13T11:13:29 | Jupyter Notebook | UTF-8 | Python | false | false | 550 | py | import numpy as np
def display_shape(a):
print
print
print "Number of elements in a = %d" % (a.size)
print "Number of dimensions in a =%d" % (a.ndim)
print "Rows and Columns in a ", a.shape
print
# Create a matrix with all elements
ones_matrix = np.ones((3,3)) # 1
display_shape(ones_matrix)
# Create a matrix with all elements
zeros_matrix = np.zeros((3,3))
display_shape(zeros_matrix)
identity_matrix = np.eye(N=3,M=3,k=0)
display_shape(identity_matrix)
identity_matrix = np.eye(N=3,k=1)
display_shape(identity_matrix) | [
"chenglch@cn.ibm.com"
] | chenglch@cn.ibm.com |
ef852b1ea95ab6b607b7111d6e352702e95e413f | 28def9c6ad5053dcd8d9ea81ef04c488bf413bb4 | /untwisted/exceptions.py | 8e555e4c948e52ef1c527a2d6cf9854042e31867 | [
"MIT"
] | permissive | kgisl/untwisted | 2b6ebd5a3a88880d785c34186444831248119935 | b1277d4d5ad0982d4bc307ed6cdbd7923b0a3305 | refs/heads/master | 2021-01-01T06:01:33.514588 | 2017-07-14T22:31:31 | 2017-07-14T22:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | class Stop(Exception):
"""
This exception is used to avoid remaining handles being
processed for a given event.
from untwisted.dispatcher import Dispatcher, Stop
def handle0(dispatcher):
raise Stop
def handle1(dispatcher):
print 'it will not be processed!'
dispatcher = Dispatcher()
dispatcher.add_map('alpha', handle0)
dispatcher.add_map('alpha', handle1)
dispatcher.drive('alpha')
"""
pass
class Erase(Exception):
"""
When this exception is thrown from a handle it avoids such a handle
being processed again upon its event.
from untwisted.dispatcher import Dispatcher, Erase
def handle(dispatcher):
print 'It will be called just once!'
raise Erase
dispatcher = Dispatcher()
dispatcher.add_map('alpha', handle)
dispatcher.drive('alpha')
dispatcher.drive('alpha')
"""
pass
| [
"ioliveira.id.uff.br"
] | ioliveira.id.uff.br |
ca264bba01fbb8049800dd66a1b42075294bab3f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/003_Placing several components in the box/003_Alignment of form components/074_WrapAllRows.py | a93093d67a978e4b8b6ed215b3a1f0ed61d86509 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
import sys
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QWidget()
window.setWindowTitle("WrapAllRows")
window.resize(300, 150)
lineEdit = QtWidgets.QLineEdit()
textEdit = QtWidgets.QTextEdit()
button1 = QtWidgets.QPushButton("О&тправить")
button2 = QtWidgets.QPushButton("О&чистить")
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(button1)
hbox.addWidget(button2)
form = QtWidgets.QFormLayout()
form.setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
form.addRow("&Название:", lineEdit)
form.addRow("&Описание:", textEdit)
form.addRow(hbox)
window.setLayout(form)
window.show()
sys.exit(app.exec_()) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
39876d9191216f5d127a8b23705a2a7e08864d52 | a055bcba66f9ca8acd87042d3a594296f7ccb610 | /images/views.py | a9d154aa2c706fc83ee84bd990ef1c6696ebf6c9 | [] | no_license | shineforever/bookmarks | 7c3a841159435d85d72003b887759aa063c52253 | 100fceff7f5ff27eb048008dbff9ddbcd6364f97 | refs/heads/master | 2021-01-20T20:44:44.891324 | 2016-07-27T08:31:54 | 2016-07-27T08:31:54 | 62,046,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import ImageCreateForm
# Create your views here.
@login_required
def image_create(request):
if request.method == 'POST':
form = ImageCreateForm(data=request.POST)
if form.is_valid():
cd = form.cleaned_data
new_item = form.save(commit=False)
messages.success(request,'Image added successfully')
return redirect(new_item.get_absolute_url())
else:
#GET
form = ImageCreateForm(data=request.GET)
return render(request,
'images/image/create.html',
{'section': 'images',
'form': form})
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
7f91c02504e6d266ed0d2d3714c567ea0a2fd731 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/gensim/nosy.py | 3e6340e85f0878212b450c7936cc786ab7e9c1d3 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:67fa7e813100e4a41be9728ee8a521d4a488ec12655fc2e06e5c2a302445df3b
size 1407
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
72223f10d913723ce44e84a0057fb20a83494203 | a31e7a01b0a7879ddbda7ba3a606ff4df718f0ef | /app/ingredients/apis/__init__.py | f83d6db436bc44756bd94b14c67da226d9d63650 | [] | no_license | smallbee3/Subway_Server | 27a477c81b830d2f264afb09c646d53d8096e5f4 | c0bebf3715663c7d29ffdc9e9ff878d226dd3496 | refs/heads/master | 2021-02-17T00:58:29.371121 | 2018-12-27T10:43:51 | 2018-12-27T10:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from .sandwich import *
from .bread import *
from .cheese import *
from .toasting import *
from .toppings import *
from .vegetables import *
from .sauces import *
| [
"smallbee3@gmail.com"
] | smallbee3@gmail.com |
e7a2127c826f94d3c911d95c8b6038cccefef18c | 78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c | /AlgorithmStudy/백준/5 DFS & BFS/11 토마토.py | 60372a187211792a6f091d124a6a3eac1a1504aa | [] | no_license | cladren123/study | ef2c45bc489fa658dbc9360fb0b0de53250500e5 | 241326e618f1f3bb1568d588bf6f53b78920587a | refs/heads/master | 2023-09-02T02:21:24.560967 | 2021-11-05T12:20:06 | 2021-11-05T12:20:06 | 368,753,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py |
"""
실버1
"""
# m:가로칸 n:세로칸
from collections import deque
m,n = map(int, input().split())
# 1:은 익은 토마토 0:익지 않은 토마토 -1:토마토가 들어있지 않은 칸
board = [list(map(int, input().split())) for _ in range(n)]
visited= [[-3] * m for _ in range(n)]
que = deque()
dx = [1,0,-1,0]
dy = [0,-1,0,1]
count = 0
for i in range(n) :
for j in range(m) :
if board[i][j] == -1 :
visited[i][j] = -1
if board[i][j] == 1 :
que.append([i,j])
visited[i][j] = count
while que :
y,x = que.popleft()
for i in range(4) :
nexty = y + dy[i]
nextx = x + dx[i]
if 0 <= nexty < n and 0 <= nextx < m :
if board[nexty][nextx] == 0 and visited[nexty][nextx] == -3 :
que.append([nexty,nextx])
visited[nexty][nextx] = visited[y][x] + 1
board[nexty][nextx] = 1
answer = 0
for i in visited :
if -3 in i :
answer = -1
break
else :
if answer < max(i) :
answer = max(i)
print(answer)
# for i in board :
# print(i)
#
# print()
#
# for i in visited :
# print(i)
| [
"48821942+cladren123@users.noreply.github.com"
] | 48821942+cladren123@users.noreply.github.com |
1732a222aa9a8307cf510c5897e748bd5b556e19 | fdb8d96d06cb7e74153a178fd17b449e89f44cd0 | /poo_vs_estructurado/poo.py | e24044e8067356ec354c939e87f577fa5eb7830e | [] | no_license | EmaSMach/info2020 | c84916521d2dd21040419cb469c76c589b98be89 | a184dc376cb5e0b894a32d01681b71c824d993d3 | refs/heads/master | 2022-12-06T08:52:34.994922 | 2020-08-24T02:57:40 | 2020-08-24T02:57:40 | 273,131,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | # Creo una estructura para los clientes
class Cliente:
def __init__(self, dni, nombre, apellidos):
self.dni = dni
self.nombre = nombre
self.apellidos = apellidos
def __str__(self):
return '{} {}'.format(self.nombre, self.apellidos)
def __repr__(self):
return str(self)
# Y otra para las empresas
class Empresa:
def __init__(self, clientes=[]):
self.clientes = clientes
def mostrar_cliente(self, dni=None):
for c in self.clientes:
if c.dni == dni:
print(c)
return
print("Cliente no encontrado")
def borrar_cliente(self, dni=None):
for i, c in enumerate(self.clientes):
if c.dni == dni:
del(self.clientes[i])
print(str(c), "> BORRADO")
return
print("Cliente no encontrado")
# Ahora utilizaremos ambas estructuras
# Creemos un par de clientes
hector = Cliente(nombre="Hector", apellidos="Costa Guzman", dni="11111111A")
juan = Cliente("22222222B", "Juan", "Gonzalez Marquez")
# Creemos una empresa con los clientes iniciales
empresa = Empresa(clientes=[hector, juan])
# Se muestran todos los clientes
print("==LISTADO DE CLIENTES==")
print(empresa.clientes)
print("\n==MOSTRAR CLIENTES POR DNI==")
# Se consulta clientes por DNI
empresa.mostrar_cliente("11111111A")
empresa.mostrar_cliente("11111111Z")
print("\n==BORRAR CLIENTES POR DNI==")
# Se borra un cliente por DNI
empresa.borrar_cliente("22222222V")
empresa.borrar_cliente("22222222B")
# Se muestran de nuevo todos los clientes
print("\n==LISTADO DE CLIENTES==")
print(empresa.clientes)
| [
"davidemanuelsandoval@gmail.com"
] | davidemanuelsandoval@gmail.com |
a7fd6a5636da3ad3daab9964b5057344b43fbd77 | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /monk/system_unit_tests/pytorch/test_optimizer_adadelta.py | 9f4ba4daa325105e45dc523eb6c714525e3b7b40 | [
"Apache-2.0"
] | permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 1,828 | py | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
def test_optimizer_adadelta(system_dict):
forward = True;
if(not os.path.isdir("datasets")):
os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
os.system("! unzip -qq datasets.zip")
test = "test_optimizer_adadelta";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
gtf.Default(dataset_path="../../system_check_tests/datasets/dataset_cats_dogs_train",
model_name="resnet18", freeze_base_network=True, num_epochs=2);
gtf.optimizer_adadelta(0.01, weight_decay=0.0001, rho=0.9,
clipnorm=1.0, clipvalue=0.5);
gtf.Train();
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
8c2cfcea57ba4e2829b34ac0622ff9d4903f0378 | f2201a77b8039215591aaa31dddae7ebb72301c2 | /backend/users/migrations/0002_auto_20201119_0018.py | e972e5c362cf7d7c108192355b442edcf2e62a64 | [] | no_license | crowdbotics-apps/start-up-22744 | 8726c58855ffee7ceb48100c9ebeb26a377a1051 | bbd9ce066d6636fe6866c7070a1a8370033ba91c | refs/heads/master | 2023-01-11T15:30:38.074614 | 2020-11-19T00:19:18 | 2020-11-19T00:19:18 | 314,091,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Generated by Django 2.2.17 on 2020-11-19 00:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
08422f80718e6ddd8b5db6147f40775b09d9554f | d9f10273960c6956db55f694cdee5910554addd1 | /run.py | a815b8440b2399dddb1a48ace624494c331228bf | [] | no_license | manuelborowski/infoheliks | 09b7c6618922aa35ec2334b64cbf55d4bf0d4a80 | a543960a28d00d204a4a699d58964faec6326847 | refs/heads/main | 2023-04-18T11:28:03.115297 | 2021-05-04T08:46:25 | 2021-05-04T08:46:25 | 350,639,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from app import flask_app, socketio
if __name__ == '__main__':
socketio.run(flask_app, port=5026, host='127.0.0.1', debug=False) | [
"emmanuel.borowski@gmail.com"
] | emmanuel.borowski@gmail.com |
e0403b2849329c0dea1acd1f3349257cd8c11022 | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_37_BB/easy/646. First Position Unique Character.py | 1f8a68e990d0be27eacc8cb5a84adaeaa998a1ad | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | class Solution:
"""
@param s: a string
@return: it's index
"""
def firstUniqChar(self, s):
counts = {}
for ch in s:
counts[ch] = counts.get(ch, 0) + 1
for i in range(len(s)):
if counts[s[i]] == 1:
return i
return -1 | [
"mike.tc.chen101@gmail.com"
] | mike.tc.chen101@gmail.com |
cce8c03c7514638d39b72b60615e7063c2e4a4ac | dae8e0070b093d662fdeea026e3eb48814af73c5 | /Autosampler/analysis/nonlinearRegression.py | f678adb1f0f4ed85179278d694bf32842efd643e | [] | no_license | clipo/RHX | 43d3dc8e0f2a4d90a8c83ec2a9e4fc0be30fddae | 93286e17df1ec05d98d791671d641d86b7f588b9 | refs/heads/master | 2021-01-02T23:06:23.023476 | 2013-04-27T21:42:01 | 2013-04-27T21:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | __author__ = 'carllipo'
import numpy as np
from scipy.optimize import leastsq
def function(a, time):
return a * np.power(time, 0.25)
def residuals(p, y, x):
err = y - function(x, p)
return err
def nlinRegression(timeArray, weightChangeArray, minval, maxval):
nlx = []
nly = []
count = 0
a_guess = 0.005
for var in timeArray:
if minval < var < maxval:
nlx.append(var)
nly.append(weightChangeArray[count])
count += 1
kd, cov, infodict, mesg, ier = leastsq(residuals,
a_guess, args=(timeArray, weightChangeArray), full_output=True)
return kd[0]
timeArray = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
weightChangeArray = [0, .5, 1, 1.3, 3, 5, 7, 8, 10, 12, 12.5, 13.5, 14]
minval = -1
maxval = 16
alpha = nlinRegression(timeArray, weightChangeArray, minval, maxval)
print alpha
| [
"clipo@csulb.edu"
] | clipo@csulb.edu |
e55f1275abce7b05777331f05a7db588bb10a82f | eb74806869a4340a6d8a2623bbe72bd4e64dcde8 | /apps/push/signals.py | 2f2aa7d3d0a552e734bfa3cc964dd3aa73a9278b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | sictiru/NewsBlur | a0874a1044926d2268ba07a928e62fce5c9a8310 | 1ab88e4cc34775d00a1ac90ee08bc2498577e773 | refs/heads/sictiru | 2023-08-19T20:24:20.638019 | 2023-08-15T03:52:09 | 2023-08-15T03:52:09 | 250,445,213 | 1 | 0 | MIT | 2023-03-06T15:34:38 | 2020-03-27T05:05:44 | Objective-C | UTF-8 | Python | false | false | 260 | py | # Adapted from djpubsubhubbub. See License: http://git.participatoryculture.org/djpubsubhubbub/tree/LICENSE
from django.dispatch import Signal
pre_subscribe = Signal(providing_args=['created'])
verified = Signal()
updated = Signal(providing_args=['update'])
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
8c965bc5e529ecf239d5241c6d31618513eb5b69 | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/ibvpy/mesh/fe_domain.py | 77d1550ea5ee3bd2e315cafde023e53e6b2b7379 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 4,549 | py |
from enthought.traits.api import \
Array, Bool, Callable, Enum, Float, HasTraits, Interface, implements, \
Instance, Int, Trait, Str, Enum, Callable, List, TraitDict, Any, \
on_trait_change, Tuple, WeakRef, Delegate, Property, cached_property, \
This, self, TraitError, Button, Event
from enthought.traits.ui.api import \
View, Item, Group
from numpy import array, arange
from ibvpy.core.sdomain import \
SDomain
from ibvpy.core.scontext import \
SContext
from ibvpy.dots.dots_list_eval import \
DOTSListEval
from ibvpy.rtrace.rt_domain_list import \
RTraceDomainList
class FEDomain( SDomain ):
'''Test the state dependencies within the hierarchical domain representation.
'''
changed_structure = Event
subdomains = List( domain_changed = True )
@on_trait_change( 'changed_structure' )
def _validate_subdomains( self ):
for domain in self.subdomains:
domain.validate()
xdomains = List( domain_changed = True )
serialized_subdomains = List
def _append_in_series( self, new_subdomain ):
'''Link the new subdomain at the end of the series.
'''
if self.serialized_subdomains:
last_subdomain = self.serialized_subdomains[-1]
last_subdomain.next_domain = new_subdomain
new_subdomain.previous_domain = last_subdomain
self.serialized_subdomains.append( new_subdomain )
nonempty_subdomains = Property( depends_on = 'changed_structure' )
@cached_property
def _get_nonempty_subdomains( self ):
d_list = []
for d in self.serialized_subdomains:
if d.n_active_elems > 0:
d_list.append( d )
return d_list
n_dofs = Property
def _get_n_dofs( self ):
'''Return the total number of dofs in the domain.
Use the last subdomain's: dof_offset + n_dofs
'''
last_subdomain = self.serialized_subdomains[-1]
return last_subdomain.dof_offset + last_subdomain.n_dofs
dof_offset_arr = Property
def _get_dof_offset_arr( self ):
'''
Return array of the dof offsets
from serialized subdomains
'''
a = array( [domain.dof_offset
for domain in self.serialized_subdomains] )
return a
#----------------------------------------------------------------------------
# Methods for time stepper
#----------------------------------------------------------------------------
dots = Property( depends_on = 'changed_structure' )
@cached_property
def _get_dots( self ):
return DOTSListEval( sdomain = self,
dots_list = [ subdomain.dots
for subdomain
in self.nonempty_subdomains ] )
def new_scontext( self ):
'''
Setup a new spatial context.
'''
sctx = SContext()
sctx.domain_list = self
return sctx
#-----------------------------------------------------------------
# Response tracer background mesh
#-----------------------------------------------------------------
rt_bg_domain = Property( depends_on = 'changed_structure' )
@cached_property
def _get_rt_bg_domain( self ):
return RTraceDomainList( subfields = [ subdomain.rt_bg_domain
for subdomain
in self.nonempty_subdomains ],
sd = self )
def redraw( self ):
self.rt_bg_domain.redraw()
#----------------------------------------------------------------------------
# Methods for extracting ranges from the domain
#----------------------------------------------------------------------------
def get_lset_subdomain( self, lset_function ):
'''@TODO - implement the subdomain selection method
'''
raise NotImplementedError
def get_boundary( self, side = None ):
'''@todo: - implement the boundary extraction
'''
raise NotImplementedError
def get_interior( self ):
'''@todo: - implement the boundary extraction
'''
raise NotImplementedError
def __iter__( self ):
return iter( self.subdomains )
traits_view = View( Group(
),
resizable = True,
scrollable = True,
)
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
fa8815618592d941b28acb1eff9ab0cc10c18098 | ee5040164beb866310c9cf23584002a342b451c0 | /infra/libs-400rc2-20190512/examples/bmp280_simpletest.py | a54217ef21ce3baff21f2e95ebb304d914368ca5 | [
"MIT"
] | permissive | jadudm/feather-isa | cf9a47c627408addbbc84581e5d6dff35a79773e | b7419e6698c3f64be4d8122656eb8124631ca859 | refs/heads/master | 2020-05-22T11:45:33.753573 | 2019-06-11T15:49:42 | 2019-06-11T15:49:42 | 186,329,428 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | import time
import board
# import digitalio # For use with SPI
import busio
import adafruit_bmp280
# Create library object using our Bus I2C port
i2c = busio.I2C(board.SCL, board.SDA)
bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c)
# OR create library object using our Bus SPI port
#spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
#bmp_cs = digitalio.DigitalInOut(board.D10)
#bmp280 = adafruit_bmp280.Adafruit_BMP280_SPI(spi, bmp_cs)
# change this to match the location's pressure (hPa) at sea level
bmp280.sea_level_pressure = 1013.25
while True:
print("\nTemperature: %0.1f C" % bmp280.temperature)
print("Pressure: %0.1f hPa" % bmp280.pressure)
print("Altitude = %0.2f meters" % bmp280.altitude)
time.sleep(2)
| [
"matt@jadud.com"
] | matt@jadud.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.